1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #6559 from vieux/bump_v1.0.1

Bump v1.0.1
This commit is contained in:
Victor Vieux 2014-06-19 18:03:54 -07:00
commit 7030cdce59
261 changed files with 2982 additions and 4706 deletions

View file

@ -1,5 +1,28 @@
# Changelog
## 1.0.1 (2014-06-19)
#### Notable features since 1.0.0
* Enhance security for the LXC driver
#### Builder
* Fix `ONBUILD` instruction passed to grandchildren
#### Runtime
* Fix events subscription
* Fix /etc/hostname file with host networking
* Allow `-h` and `--net=none`
* Fix issue with hotplug devices in `--privileged`
#### Client
* Fix artifacts with events
* Fix a panic with empty flags
* Fix `docker cp` on Mac OS X
#### Miscellaneous
* Fix compilation on Mac OS X
* Fix several races
## 1.0.0 (2014-06-09)
#### Notable features since 0.12.0

View file

@ -1,5 +1,4 @@
Solomon Hykes <solomon@docker.com> (@shykes)
Guillaume J. Charmes <guillaume@docker.com> (@creack)
Victor Vieux <vieux@docker.com> (@vieux)
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
.mailmap: Tianon Gravi <admwiggin@gmail.com> (@tianon)

View file

@ -18,7 +18,13 @@ It benefits directly from the experience accumulated over several years
of large-scale operation and support of hundreds of thousands of
applications and databases.
![Docker L](docs/theme/mkdocs/img/logo_compressed.png "Docker")
![Docker L](docs/theme/mkdocs/images/docker-logo-compressed.png "Docker")
## Security Disclosure
Security is very important to us. If you have any issue regarding security,
please disclose the information responsibly by sending an email to
security@docker.com and not by creating a github issue.
## Better than VMs
@ -142,11 +148,10 @@ Docker can be installed on your local machine as well as servers - both
bare metal and virtualized. It is available as a binary on most modern
Linux systems, or as a VM on Windows, Mac and other systems.
We also offer an interactive tutorial for quickly learning the basics of
using Docker.
We also offer an [interactive tutorial](http://www.docker.com/tryit/)
for quickly learning the basics of using Docker.
For up-to-date install instructions and online tutorials, see the
[Getting Started page](http://www.docker.io/gettingstarted/).
For up-to-date install instructions, see the [Docs](http://docs.docker.com).
Usage examples
==============

View file

@ -1 +1 @@
1.0.0
1.0.1

View file

@ -89,25 +89,6 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
return nil
}
// FIXME: 'insert' is deprecated.
func (cli *DockerCli) CmdInsert(args ...string) error {
fmt.Fprintf(os.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'docker build' and 'ADD' instead.\n")
cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 3 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("url", cmd.Arg(1))
v.Set("path", cmd.Arg(2))
return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil)
}
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new image from the source code at PATH")
tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success")

View file

@ -105,7 +105,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b
if len(body) == 0 {
return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL)
}
return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
return nil, resp.StatusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body))
}
return resp.Body, resp.StatusCode, nil
}

View file

@ -398,7 +398,7 @@ func getContainersLogs(eng *engine.Engine, version version.Version, w http.Respo
job.Stdout.Add(outStream)
job.Stderr.Set(errStream)
if err := job.Run(); err != nil {
fmt.Fprintf(outStream, "Error: %s\n", err)
fmt.Fprintf(outStream, "Error running logs job: %s\n", err)
}
return nil
}
@ -811,7 +811,7 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re
job.Stdout.Add(outStream)
job.Stderr.Set(errStream)
if err := job.Run(); err != nil {
fmt.Fprintf(outStream, "Error: %s\n", err)
fmt.Fprintf(outStream, "Error attaching: %s\n", err)
}
return nil
@ -841,7 +841,7 @@ func wsContainersAttach(eng *engine.Engine, version version.Version, w http.Resp
job.Stdout.Add(ws)
job.Stderr.Set(ws)
if err := job.Run(); err != nil {
utils.Errorf("Error: %s", err)
utils.Errorf("Error attaching websocket: %s", err)
}
})
h.ServeHTTP(w, r)
@ -1022,7 +1022,7 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local
}
if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
utils.Errorf("Error: %s", err)
utils.Errorf("Error making handler: %s", err)
httpError(w, err)
}
}

View file

@ -114,6 +114,43 @@ func TestGetInfo(t *testing.T) {
}
}
func TestGetContainersByName(t *testing.T) {
eng := engine.New()
name := "container_name"
var called bool
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
called = true
if job.Args[0] != name {
t.Fatalf("name != '%s': %#v", name, job.Args[0])
}
if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
t.Fatal("dirty env variable not set")
} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
t.Fatal("dirty env variable set when it shouldn't")
}
v := &engine.Env{}
v.SetBool("dirty", true)
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t)
if !called {
t.Fatal("handler was not called")
}
if r.HeaderMap.Get("Content-Type") != "application/json" {
t.Fatalf("%#v\n", r)
}
var stdoutJson interface{}
if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
t.Fatalf("%#v", err)
}
if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
t.Fatalf("%#v", stdoutJson)
}
}
func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
r := httptest.NewRecorder()
req, err := http.NewRequest(method, target, body)

View file

@ -262,11 +262,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
if hdr.Typeflag != tar.TypeSymlink {
if err := system.UtimesNano(path, ts); err != nil {
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
return err
}
} else {
if err := system.LUtimesNano(path, ts); err != nil {
if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
return err
}
}

View file

@ -164,6 +164,6 @@ func TestUntarUstarGnuConflict(t *testing.T) {
}
}
if !found {
t.Fatal("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
}
}

View file

@ -100,7 +100,7 @@ echo
echo 'Generally Necessary:'
echo -n '- '
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)([, ]|$)/ && $8 == "cgroup" { print $5 }' /proc/$$/mountinfo | head -n1)"
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
@ -116,7 +116,7 @@ fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
DEVPTS_MULTIPLE_INSTANCES
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_SCHED
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED
MACVLAN VETH BRIDGE
NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}

View file

@ -309,14 +309,6 @@ _docker_info()
return
}
_docker_insert()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags_and_ids
fi
}
_docker_inspect()
{
case "$prev" in
@ -393,7 +385,7 @@ _docker_ps()
{
case "$prev" in
--since|--before)
__docker_containers_all
__docker_containers_all
;;
-n)
return
@ -438,9 +430,7 @@ _docker_push()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_image_repos
# TODO replace this with __docker_image_repos_and_tags
# see https://github.com/dotcloud/docker/issues/3411
__docker_image_repos_and_tags
fi
}

View file

@ -120,10 +120,6 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a ne
# info
complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information'
# insert
complete -c docker -f -n '__fish_docker_no_subcommand' -a insert -d 'Insert a file in an image'
complete -c docker -A -f -n '__fish_seen_subcommand_from insert' -a '(__fish_print_docker_images)' -d "Image"
# inspect
complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.'

5
contrib/completion/zsh/_docker Executable file → Normal file
View file

@ -139,11 +139,6 @@ __docker_subcommand () {
(history)
_arguments ':images:__docker_images'
;;
(insert)
_arguments '1:containers:__docker_containers' \
'2:URL:(http:// file://)' \
'3:file:_files'
;;
(kill)
_arguments '*:containers:__docker_runningcontainers'
;;

View file

@ -38,7 +38,7 @@ A Dockerfile is similar to a Makefile.
**sudo docker build -t repository/tag .**
-- specifies a repository and tag at which to save the new image if the build
succeeds. The Docker daemon runs the steps one-by-one, commiting the result
succeeds. The Docker daemon runs the steps one-by-one, committing the result
to a new image if necessary before finally outputting the ID of the new
image. The Docker daemon automatically cleans up the context it is given.

View file

@ -20,7 +20,7 @@ seconds since epoch, or date string.
## Listening for Docker events
After running docker events a container 786d698004576 is started and stopped
(The container name has been shortened in the ouput below):
(The container name has been shortened in the output below):
# docker events
[2014-04-12 18:23:04 -0400 EDT] 786d69800457: (from whenry/testimage:latest) start
@ -43,4 +43,4 @@ Again the output container IDs have been shortened for the purposes of this docu
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
based on docker.io source material and internal work.
based on docker.io source material and internal work.

View file

@ -5,17 +5,18 @@
docker-pull - Pull an image or a repository from the registry
# SYNOPSIS
**docker pull** NAME[:TAG]
**docker pull** [REGISTRY_PATH/]NAME[:TAG]
# DESCRIPTION
This command pulls down an image or a repository from the registry. If
there is more than one image for a repository (e.g. fedora) then all
images for that repository name are pulled down including any tags.
It is also possible to specify a non-default registry to pull from.
# EXAMPLE
# EXAMPLES
# Pull a reposiotry with multiple images
# Pull a repository with multiple images
$ sudo docker pull fedora
Pulling repository fedora
@ -31,6 +32,19 @@ images for that repository name are pulled down including any tags.
fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB
fedora latest 105182bb5e8b 5 days ago 372.7 MB
# Pull an image, manually specifying path to the registry and tag
$ sudo docker pull registry.hub.docker.com/fedora:20
Pulling repository fedora
3f2fed40e4b0: Download complete
511136ea3c5a: Download complete
fd241224e9cf: Download complete
$ sudo docker images
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
fedora 20 3f2fed40e4b0 4 days ago 372.7 MB
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
based on docker.io source material and internal work.

View file

@ -10,15 +10,15 @@ docker \- Docker image and container command line interface
# DESCRIPTION
**docker** has two distinct functions. It is used for starting the Docker
daemon and to run the CLI (i.e., to command the daemon to manage images,
containers etc.) So **docker** is both a server, as a deamon, and a client
containers etc.) So **docker** is both a server, as a daemon, and a client
to the daemon, through the CLI.
To run the Docker deamon you do not specify any of the commands listed below but
To run the Docker daemon you do not specify any of the commands listed below but
must specify the **-d** option. The other options listed below are for the
daemon only.
The Docker CLI has over 30 commands. The commands are listed below and each has
its own man page which explain usage and arguements.
its own man page which explain usage and arguments.
To see the man page for a command run **man docker <command>**.

View file

@ -13,7 +13,7 @@ for FILE in *.md; do
base="$(basename "$FILE")"
name="${base%.md}"
num="${name##*.}"
if [ -z "$num" -o "$base" = "$num" ]; then
if [ -z "$num" -o "$name" = "$num" ]; then
# skip files that aren't of the format xxxx.N.md (like README.md)
continue
fi

View file

@ -12,7 +12,7 @@
<array>
<dict>
<key>match</key>
<string>^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR)\s</string>
<string>^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR|COPY)\s</string>
<key>captures</key>
<dict>
<key>0</key>

View file

@ -11,7 +11,7 @@ let b:current_syntax = "dockerfile"
syntax case ignore
syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|VOLUME|WORKDIR)\s/
syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|VOLUME|WORKDIR|COPY)\s/
highlight link dockerfileKeyword Keyword
syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/

View file

@ -15,6 +15,8 @@ import (
"syscall"
"time"
"github.com/docker/libcontainer/devices"
"github.com/docker/libcontainer/label"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/daemon/graphdriver"
@ -22,8 +24,6 @@ import (
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/links"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer/devices"
"github.com/dotcloud/docker/pkg/networkfs/etchosts"
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
"github.com/dotcloud/docker/pkg/symlink"
@ -85,7 +85,12 @@ type Container struct {
}
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
pth, err := container.jsonPath()
if err != nil {
return err
}
data, err := ioutil.ReadFile(pth)
if err != nil {
return err
}
@ -101,15 +106,22 @@ func (container *Container) FromDisk() error {
return container.readHostConfig()
}
func (container *Container) ToDisk() (err error) {
func (container *Container) ToDisk() error {
data, err := json.Marshal(container)
if err != nil {
return
return err
}
err = ioutil.WriteFile(container.jsonPath(), data, 0666)
pth, err := container.jsonPath()
if err != nil {
return
return err
}
err = ioutil.WriteFile(pth, data, 0666)
if err != nil {
return err
}
return container.WriteHostConfig()
}
@ -118,33 +130,45 @@ func (container *Container) readHostConfig() error {
// If the hostconfig file does not exist, do not read it.
// (We still have to initialize container.hostConfig,
// but that's OK, since we just did that above.)
_, err := os.Stat(container.hostConfigPath())
pth, err := container.hostConfigPath()
if err != nil {
return err
}
_, err = os.Stat(pth)
if os.IsNotExist(err) {
return nil
}
data, err := ioutil.ReadFile(container.hostConfigPath())
data, err := ioutil.ReadFile(pth)
if err != nil {
return err
}
return json.Unmarshal(data, container.hostConfig)
}
func (container *Container) WriteHostConfig() (err error) {
func (container *Container) WriteHostConfig() error {
data, err := json.Marshal(container.hostConfig)
if err != nil {
return
return err
}
return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
pth, err := container.hostConfigPath()
if err != nil {
return err
}
return ioutil.WriteFile(pth, data, 0666)
}
func (container *Container) getResourcePath(path string) string {
func (container *Container) getResourcePath(path string) (string, error) {
cleanPath := filepath.Join("/", path)
return filepath.Join(container.basefs, cleanPath)
return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs)
}
func (container *Container) getRootResourcePath(path string) string {
func (container *Container) getRootResourcePath(path string) (string, error) {
cleanPath := filepath.Join("/", path)
return filepath.Join(container.root, cleanPath)
return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
}
func populateCommand(c *Container, env []string) error {
@ -324,7 +348,12 @@ func (container *Container) StderrLogPipe() io.ReadCloser {
}
func (container *Container) buildHostnameFile() error {
container.HostnamePath = container.getRootResourcePath("hostname")
hostnamePath, err := container.getRootResourcePath("hostname")
if err != nil {
return err
}
container.HostnamePath = hostnamePath
if container.Config.Domainname != "" {
return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
}
@ -336,7 +365,11 @@ func (container *Container) buildHostnameAndHostsFiles(IP string) error {
return err
}
container.HostsPath = container.getRootResourcePath("hosts")
hostsPath, err := container.getRootResourcePath("hosts")
if err != nil {
return err
}
container.HostsPath = hostsPath
extraContent := make(map[string]string)
@ -681,19 +714,23 @@ func (container *Container) Unmount() error {
return container.daemon.Unmount(container)
}
func (container *Container) logPath(name string) string {
func (container *Container) logPath(name string) (string, error) {
return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
pth, err := container.logPath(name)
if err != nil {
return nil, err
}
return os.Open(pth)
}
func (container *Container) hostConfigPath() string {
func (container *Container) hostConfigPath() (string, error) {
return container.getRootResourcePath("hostconfig.json")
}
func (container *Container) jsonPath() string {
func (container *Container) jsonPath() (string, error) {
return container.getRootResourcePath("config.json")
}
@ -756,8 +793,7 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
var filter []string
resPath := container.getResourcePath(resource)
basePath, err := symlink.FollowSymlinkInScope(resPath, container.basefs)
basePath, err := container.getResourcePath(resource)
if err != nil {
container.Unmount()
return nil, err
@ -808,11 +844,16 @@ func (container *Container) GetPtyMaster() (*os.File, error) {
}
func (container *Container) HostConfig() *runconfig.HostConfig {
return container.hostConfig
container.Lock()
res := container.hostConfig
container.Unlock()
return res
}
func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) {
container.Lock()
container.hostConfig = hostConfig
container.Unlock()
}
func (container *Container) DisableLink(name string) {
@ -861,7 +902,13 @@ func (container *Container) setupContainerDns() error {
} else if len(daemon.config.DnsSearch) > 0 {
dnsSearch = daemon.config.DnsSearch
}
container.ResolvConfPath = container.getRootResourcePath("resolv.conf")
resolvConfPath, err := container.getRootResourcePath("resolv.conf")
if err != nil {
return err
}
container.ResolvConfPath = resolvConfPath
return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch)
} else {
container.ResolvConfPath = "/etc/resolv.conf"
@ -886,12 +933,20 @@ func (container *Container) initializeNetworking() error {
content, err := ioutil.ReadFile("/etc/hosts")
if os.IsNotExist(err) {
return container.buildHostnameAndHostsFiles("")
}
if err != nil {
} else if err != nil {
return err
}
container.HostsPath = container.getRootResourcePath("hosts")
if err := container.buildHostnameFile(); err != nil {
return err
}
hostsPath, err := container.getRootResourcePath("hosts")
if err != nil {
return err
}
container.HostsPath = hostsPath
return ioutil.WriteFile(container.HostsPath, content, 0644)
} else if container.hostConfig.NetworkMode.IsContainer() {
// we need to get the hosts files from the container to join
@ -1007,12 +1062,18 @@ func (container *Container) setupWorkingDirectory() error {
if container.Config.WorkingDir != "" {
container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
pthInfo, err := os.Stat(container.getResourcePath(container.Config.WorkingDir))
pth, err := container.getResourcePath(container.Config.WorkingDir)
if err != nil {
return err
}
pthInfo, err := os.Stat(pth)
if err != nil {
if !os.IsNotExist(err) {
return err
}
if err := os.MkdirAll(container.getResourcePath(container.Config.WorkingDir), 0755); err != nil {
if err := os.MkdirAll(pth, 0755); err != nil {
return err
}
}
@ -1025,12 +1086,19 @@ func (container *Container) setupWorkingDirectory() error {
func (container *Container) startLoggingToDisk() error {
// Setup logging of stdout and stderr to disk
if err := container.daemon.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
pth, err := container.logPath("json")
if err != nil {
return err
}
if err := container.daemon.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
if err := container.daemon.LogToDisk(container.stdout, pth, "stdout"); err != nil {
return err
}
if err := container.daemon.LogToDisk(container.stderr, pth, "stderr"); err != nil {
return err
}
return nil
}

View file

@ -12,6 +12,8 @@ import (
"sync"
"time"
"github.com/docker/libcontainer/label"
"github.com/docker/libcontainer/selinux"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/daemon/execdriver/execdrivers"
@ -26,10 +28,8 @@ import (
"github.com/dotcloud/docker/graph"
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/pkg/graphdb"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/namesgenerator"
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
"github.com/dotcloud/docker/pkg/selinux"
"github.com/dotcloud/docker/pkg/sysinfo"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/utils"
@ -72,9 +72,11 @@ func (c *contStore) Delete(id string) {
func (c *contStore) List() []*Container {
containers := new(History)
c.Lock()
for _, cont := range c.s {
containers.Add(cont)
}
c.Unlock()
containers.Sort()
return *containers
}

View file

@ -1,2 +1 @@
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
Guillaume J. Charmes <guillaume@docker.com> (@creack)

View file

@ -6,7 +6,7 @@ import (
"os"
"os/exec"
"github.com/dotcloud/docker/pkg/libcontainer/devices"
"github.com/docker/libcontainer/devices"
)
// Context is a generic key value pair that allows

View file

@ -0,0 +1 @@
Dinesh Subhraveti <dineshs@altiscale.com> (@dineshs-altiscale)

View file

@ -15,11 +15,10 @@ import (
"syscall"
"time"
"github.com/docker/libcontainer/cgroups"
"github.com/docker/libcontainer/label"
"github.com/docker/libcontainer/mount/nodes"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer/cgroups"
"github.com/dotcloud/docker/pkg/libcontainer/mount/nodes"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/utils"
)
@ -37,16 +36,7 @@ func init() {
if err := setupNetworking(args); err != nil {
return err
}
if err := setupCapabilities(args); err != nil {
return err
}
if err := setupWorkingDirectory(args); err != nil {
return err
}
if err := system.CloseFdsFrom(3); err != nil {
return err
}
if err := changeUser(args); err != nil {
if err := finalizeNamespace(args); err != nil {
return err
}

View file

@ -9,10 +9,8 @@ import (
"strings"
"syscall"
"github.com/docker/libcontainer/netlink"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/netlink"
"github.com/dotcloud/docker/pkg/user"
"github.com/syndtr/gocapability/capability"
)
// Clear environment pollution introduced by lxc-start
@ -107,65 +105,6 @@ func setupWorkingDirectory(args *execdriver.InitArgs) error {
return nil
}
// Takes care of dropping privileges to the desired user
func changeUser(args *execdriver.InitArgs) error {
uid, gid, suppGids, err := user.GetUserGroupSupplementary(
args.User,
syscall.Getuid(), syscall.Getgid(),
)
if err != nil {
return err
}
if err := syscall.Setgroups(suppGids); err != nil {
return fmt.Errorf("Setgroups failed: %v", err)
}
if err := syscall.Setgid(gid); err != nil {
return fmt.Errorf("Setgid failed: %v", err)
}
if err := syscall.Setuid(uid); err != nil {
return fmt.Errorf("Setuid failed: %v", err)
}
return nil
}
func setupCapabilities(args *execdriver.InitArgs) error {
if args.Privileged {
return nil
}
drop := []capability.Cap{
capability.CAP_SETPCAP,
capability.CAP_SYS_MODULE,
capability.CAP_SYS_RAWIO,
capability.CAP_SYS_PACCT,
capability.CAP_SYS_ADMIN,
capability.CAP_SYS_NICE,
capability.CAP_SYS_RESOURCE,
capability.CAP_SYS_TIME,
capability.CAP_SYS_TTY_CONFIG,
capability.CAP_AUDIT_WRITE,
capability.CAP_AUDIT_CONTROL,
capability.CAP_MAC_OVERRIDE,
capability.CAP_MAC_ADMIN,
capability.CAP_NET_ADMIN,
capability.CAP_SYSLOG,
}
c, err := capability.NewPid(os.Getpid())
if err != nil {
return err
}
c.Unset(capability.CAPS|capability.BOUNDS, drop...)
if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil {
return err
}
return nil
}
func getEnv(args *execdriver.InitArgs, key string) string {
for _, kv := range args.Env {
parts := strings.SplitN(kv, "=", 2)

View file

@ -3,9 +3,60 @@
package lxc
import (
"fmt"
"syscall"
"github.com/docker/libcontainer/namespaces"
"github.com/docker/libcontainer/security/capabilities"
"github.com/docker/libcontainer/utils"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/daemon/execdriver/native/template"
"github.com/dotcloud/docker/pkg/system"
)
func setHostname(hostname string) error {
return syscall.Sethostname([]byte(hostname))
}
func finalizeNamespace(args *execdriver.InitArgs) error {
if err := utils.CloseExecFrom(3); err != nil {
return err
}
// We use the native drivers default template so that things like caps are consistent
// across both drivers
container := template.New()
if !args.Privileged {
// drop capabilities in bounding set before changing user
if err := capabilities.DropBoundingSet(container); err != nil {
return fmt.Errorf("drop bounding set %s", err)
}
// preserve existing capabilities while we change users
if err := system.SetKeepCaps(); err != nil {
return fmt.Errorf("set keep caps %s", err)
}
}
if err := namespaces.SetupUser(args.User); err != nil {
return fmt.Errorf("setup user %s", err)
}
if !args.Privileged {
if err := system.ClearKeepCaps(); err != nil {
return fmt.Errorf("clear keep caps %s", err)
}
// drop all other capabilities
if err := capabilities.DropCapabilities(container); err != nil {
return fmt.Errorf("drop capabilities %s", err)
}
}
if err := setupWorkingDirectory(args); err != nil {
return err
}
return nil
}

View file

@ -2,6 +2,12 @@
package lxc
import "github.com/dotcloud/docker/daemon/execdriver"
func setHostname(hostname string) error {
panic("Not supported on darwin")
}
func finalizeNamespace(args *execdriver.InitArgs) error {
panic("Not supported on darwin")
}

View file

@ -4,8 +4,8 @@ import (
"strings"
"text/template"
"github.com/docker/libcontainer/label"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/label"
)
const LxcTemplate = `

View file

@ -11,8 +11,8 @@ import (
"testing"
"time"
"github.com/docker/libcontainer/devices"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/libcontainer/devices"
)
func TestLXCConfig(t *testing.T) {

View file

@ -7,7 +7,7 @@ import (
"strconv"
"strings"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/docker/libcontainer"
"github.com/dotcloud/docker/pkg/units"
)

View file

@ -3,8 +3,8 @@ package configuration
import (
"testing"
"github.com/docker/libcontainer"
"github.com/dotcloud/docker/daemon/execdriver/native/template"
"github.com/dotcloud/docker/pkg/libcontainer"
)
// Checks whether the expected capability is specified in the capabilities.

View file

@ -6,12 +6,12 @@ import (
"os/exec"
"path/filepath"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/apparmor"
"github.com/docker/libcontainer/devices"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/daemon/execdriver/native/configuration"
"github.com/dotcloud/docker/daemon/execdriver/native/template"
"github.com/dotcloud/docker/pkg/apparmor"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/devices"
)
// createContainer populates and configures the container type with the

View file

@ -11,12 +11,12 @@ import (
"sync"
"syscall"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/apparmor"
"github.com/docker/libcontainer/cgroups/fs"
"github.com/docker/libcontainer/cgroups/systemd"
"github.com/docker/libcontainer/namespaces"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/pkg/apparmor"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/cgroups/fs"
"github.com/dotcloud/docker/pkg/libcontainer/cgroups/systemd"
"github.com/dotcloud/docker/pkg/libcontainer/namespaces"
"github.com/dotcloud/docker/pkg/system"
)

View file

@ -1,9 +1,9 @@
package template
import (
"github.com/dotcloud/docker/pkg/apparmor"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/cgroups"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/apparmor"
"github.com/docker/libcontainer/cgroups"
)
// New returns the docker default configuration for libcontainer

View file

@ -30,9 +30,9 @@ import (
"sync"
"syscall"
"github.com/docker/libcontainer/label"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/pkg/label"
mountpk "github.com/dotcloud/docker/pkg/mount"
"github.com/dotcloud/docker/utils"
)

View file

@ -39,7 +39,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
fi, err := os.Stat(target)
if err != nil {
if os.IsNotExist(err) {
utils.Errorf("There are no more loopback device available.")
utils.Errorf("There are no more loopback devices available.")
}
return nil, ErrAttachLoopbackDevice
}

View file

@ -18,8 +18,8 @@ import (
"syscall"
"time"
"github.com/docker/libcontainer/label"
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/units"
"github.com/dotcloud/docker/utils"
)
@ -55,7 +55,7 @@ type DevInfo struct {
}
type MetaData struct {
Devices map[string]*DevInfo `json:devices`
Devices map[string]*DevInfo `json:"Devices"`
devicesLock sync.Mutex `json:"-"` // Protects all read/writes to Devices map
}

View file

@ -1,12 +1,13 @@
package graphtest
import (
"github.com/dotcloud/docker/daemon/graphdriver"
"io/ioutil"
"os"
"path"
"syscall"
"testing"
"github.com/dotcloud/docker/daemon/graphdriver"
)
var (
@ -94,10 +95,10 @@ func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) {
if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
if stat.Uid != uid {
t.Fatal("%s no owned by uid %d", path, uid)
t.Fatalf("%s no owned by uid %d", path, uid)
}
if stat.Gid != gid {
t.Fatal("%s not owned by gid %d", path, gid)
t.Fatalf("%s not owned by gid %d", path, gid)
}
}

View file

@ -13,11 +13,13 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
}
name := job.Args[0]
if container := daemon.Get(name); container != nil {
container.Lock()
defer container.Unlock()
if job.GetenvBool("dirty") {
b, err := json.Marshal(&struct {
*Container
HostConfig *runconfig.HostConfig
}{container, container.HostConfig()})
}{container, container.hostConfig})
if err != nil {
return job.Error(err)
}

View file

@ -8,13 +8,13 @@ import (
"strings"
"sync"
"github.com/docker/libcontainer/netlink"
"github.com/dotcloud/docker/daemon/networkdriver"
"github.com/dotcloud/docker/daemon/networkdriver/ipallocator"
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
"github.com/dotcloud/docker/daemon/networkdriver/portmapper"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/pkg/iptables"
"github.com/dotcloud/docker/pkg/netlink"
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
"github.com/dotcloud/docker/utils"
)

View file

@ -1,7 +1,7 @@
package networkdriver
import (
"github.com/dotcloud/docker/pkg/netlink"
"github.com/docker/libcontainer/netlink"
"net"
"testing"
)

View file

@ -6,7 +6,7 @@ import (
"fmt"
"net"
"github.com/dotcloud/docker/pkg/netlink"
"github.com/docker/libcontainer/netlink"
)
var (

View file

@ -98,12 +98,17 @@ func applyVolumesFrom(container *Container) error {
continue
}
stat, err := os.Stat(c.getResourcePath(volPath))
pth, err := c.getResourcePath(volPath)
if err != nil {
return err
}
if err := createIfNotExists(container.getResourcePath(volPath), stat.IsDir()); err != nil {
stat, err := os.Stat(pth)
if err != nil {
return err
}
if err := createIfNotExists(pth, stat.IsDir()); err != nil {
return err
}
@ -280,8 +285,8 @@ func initializeVolume(container *Container, volPath string, binds map[string]Bin
delete(container.VolumesRW, volPath)
}
container.Volumes[newVolPath] = destination
container.VolumesRW[newVolPath] = srcRW
container.Volumes[volPath] = destination
container.VolumesRW[volPath] = srcRW
if err := createIfNotExists(source, volIsDir); err != nil {
return err

3
docker/README.md Normal file
View file

@ -0,0 +1,3 @@
docker.go contains Docker's main function.
This file provides first line CLI argument parsing and environment variable setting.

View file

@ -13,10 +13,8 @@ RUN pip install mkdocs
#RUN easy_install -U setuptools
#RUN pip install MarkdownTools2
# this week I seem to need the latest dev release of awscli too
# awscli 1.3.6 does --error-document correctly
# https://github.com/aws/aws-cli/commit/edc2290e173dfaedc70b48cfa3624d58c533c6c3
RUN pip install awscli
# this version works, the current versions fail in different ways
RUN pip install awscli==1.3.9
# get my sitemap.xml branch of mkdocs and use that for now
RUN git clone https://github.com/SvenDowideit/mkdocs &&\

View file

@ -53,12 +53,12 @@ run `mkdocs serve`
## Style guide
The documentation is written with paragraphs wrapped at 80 colum lines to make
The documentation is written with paragraphs wrapped at 80 column lines to make
it easier for terminal use.
### Examples
When writing examples give the user hints by making them resemble what they see
When writing examples, give the user hints by making them resemble what they see
in their shell:
- Indent shell examples by 4 spaces so they get rendered as code.
@ -76,7 +76,7 @@ references them, or in a subdirectory if one already exists.
## Working using GitHub's file editor
Alternatively, for small changes and typos you might want to use GitHub's built
Alternatively, for small changes and typos you might want to use GitHub's built-
in file editor. It allows you to preview your changes right on-line (though
there can be some differences between GitHub Markdown and [MkDocs
Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). Just be
@ -85,8 +85,8 @@ work!](../CONTRIBUTING.md#sign-your-work)
## Publishing Documentation
To publish a copy of the documentation you need a `docs/awsconfig` To make life
easier for file containing AWS settings to deploy to. The release script will
To publish a copy of the documentation you need a `docs/awsconfig`
file containing AWS settings to deploy to. The release script will
create an s3 if needed, and will then push the files to it.
[profile dowideit-docs] aws_access_key_id = IHOIUAHSIDH234rwf....

View file

@ -101,10 +101,11 @@ pages:
- ['reference/api/index.md', '**HIDDEN**']
- ['reference/api/docker-io_api.md', 'Reference', 'Docker Hub API']
- ['reference/api/registry_api.md', 'Reference', 'Docker Registry API']
- ['reference/api/registry_index_spec.md', 'Reference', 'Registry & Index Spec']
- ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry Spec']
- ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API']
- ['reference/api/docker_remote_api_v1.12.md', 'Reference', 'Docker Remote API v1.12']
- ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.11']
- ['reference/api/docker_remote_api_v1.10.md', 'Reference', 'Docker Remote API v1.10']
- ['reference/api/docker_remote_api_v1.10.md', '**HIDDEN**']
- ['reference/api/docker_remote_api_v1.9.md', '**HIDDEN**']
- ['reference/api/docker_remote_api_v1.8.md', '**HIDDEN**']
- ['reference/api/docker_remote_api_v1.7.md', '**HIDDEN**']
@ -116,8 +117,8 @@ pages:
- ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**']
- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**']
- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries']
- ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker IO OAuth API']
- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker IO Accounts API']
- ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker Hub OAuth API']
- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub Accounts API']
- ['jsearch.md', '**HIDDEN**']

View file

@ -30,10 +30,10 @@ echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE"
setup_s3() {
echo "Create $BUCKET"
# Try creating the bucket. Ignore errors (it might already exist).
aws s3 mb s3://$BUCKET 2>/dev/null || true
aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true
# Check access to the bucket.
echo "test $BUCKET exists"
aws s3 ls s3://$BUCKET
aws s3 --profile $BUCKET ls s3://$BUCKET
# Make the bucket accessible through website endpoints.
echo "make $BUCKET accessible as a website"
#aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html
@ -41,7 +41,7 @@ setup_s3() {
echo
echo $s3conf
echo
aws s3api put-bucket-website --bucket $BUCKET --website-configuration "$s3conf"
aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf"
}
build_current_documentation() {
@ -57,7 +57,42 @@ upload_current_documentation() {
echo " to $dst"
echo
#s3cmd --recursive --follow-symlinks --preserve --acl-public sync "$src" "$dst"
aws s3 sync --cache-control "max-age=3600" --acl public-read --exclude "*.rej" --exclude "*.rst" --exclude "*.orig" --exclude "*.py" "$src" "$dst"
#aws s3 cp --profile $BUCKET --cache-control "max-age=3600" --acl public-read "site/search_content.json" "$dst"
# a really complicated way to send only the files we want
# if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
endings=( json html xml css js gif png JPG )
for i in ${endings[@]}; do
include=""
for j in ${endings[@]}; do
if [ "$i" != "$j" ];then
include="$include --exclude *.$j"
fi
done
echo "uploading *.$i"
run="aws s3 sync --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read \
$include \
--exclude *.txt \
--exclude *.text* \
--exclude *Dockerfile \
--exclude *.DS_Store \
--exclude *.psd \
--exclude *.ai \
--exclude *.svg \
--exclude *.eot \
--exclude *.otf \
--exclude *.ttf \
--exclude *.woff \
--exclude *.rej \
--exclude *.rst \
--exclude *.orig \
--exclude *.py \
$src $dst"
echo "======================="
#echo "$run"
#echo "======================="
$run
done
}
setup_s3

View file

@ -18,9 +18,18 @@
{ "Condition": { "KeyPrefixEquals": "use/working_with_links_names/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } },
{ "Condition": { "KeyPrefixEquals": "use/workingwithrepository/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerrepos/" } },
{ "Condition": { "KeyPrefixEquals": "use/port_redirection" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } },
{ "Condition": { "KeyPrefixEquals": "use/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "examples/" } },
{ "Condition": { "KeyPrefixEquals": "use/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "examples/" } },
{ "Condition": { "KeyPrefixEquals": "docker-io/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/" } }
{ "Condition": { "KeyPrefixEquals": "use/networking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/networking/" } },
{ "Condition": { "KeyPrefixEquals": "use/puppet/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/puppet/" } },
{ "Condition": { "KeyPrefixEquals": "use/ambassador_pattern_linking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/ambassador_pattern_linking/" } },
{ "Condition": { "KeyPrefixEquals": "use/basics/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/basics/" } },
{ "Condition": { "KeyPrefixEquals": "use/chef/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/chef/" } },
{ "Condition": { "KeyPrefixEquals": "use/host_integration/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/host_integration/" } },
{ "Condition": { "KeyPrefixEquals": "docker-io/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/" } },
{ "Condition": { "KeyPrefixEquals": "examples/cfengine_process_management/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/cfengine_process_management/" } },
{ "Condition": { "KeyPrefixEquals": "examples/https/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/https/" } },
{ "Condition": { "KeyPrefixEquals": "examples/using_supervisord/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/using_supervisord/" } },
{ "Condition": { "KeyPrefixEquals": "reference/api/registry_index_spec/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/hub_registry_spec/" } },
{ "Condition": { "KeyPrefixEquals": "use/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "examples/" } }
]
}

View file

@ -110,7 +110,9 @@ Finally, several networking options can only be provided when calling
The following sections tackle all of the above topics in an order that
moves roughly from simplest to most complex.
## <a name="dns"></a>Configuring DNS
## Configuring DNS
<a name="dns"></a>
How can Docker supply each container with a hostname and DNS
configuration, without having to build a custom image with the hostname
@ -136,14 +138,14 @@ Four different options affect container domain name services.
* `-h HOSTNAME` or `--hostname=HOSTNAME` — sets the hostname by which
the container knows itself. This is written into `/etc/hostname`,
into `/etc/hosts` as the name of the containers host-facing IP
into `/etc/hosts` as the name of the container's host-facing IP
address, and is the name that `/bin/bash` inside the container will
display inside its prompt. But the hostname is not easy to see from
outside the container. It will not appear in `docker ps` nor in the
`/etc/hosts` file of any other container.
* `--link=CONTAINER_NAME:ALIAS` — using this option as you `run` a
container gives the new containers `/etc/hosts` an extra entry
container gives the new container's `/etc/hosts` an extra entry
named `ALIAS` that points to the IP address of the container named
`CONTAINER_NAME`. This lets processes inside the new container
connect to the hostname `ALIAS` without having to know its IP. The
@ -158,9 +160,9 @@ Four different options affect container domain name services.
* `--dns-search=DOMAIN...` — sets the domain names that are searched
when a bare unqualified hostname is used inside of the container, by
writing `search` lines into the containers `/etc/resolv.conf`.
writing `search` lines into the container's `/etc/resolv.conf`.
When a container process attempts to access `host` and the search
domain `exmaple.com` is set, for instance, the DNS logic will not
domain `example.com` is set, for instance, the DNS logic will not
only look up `host` but also `host.example.com`.
Note that Docker, in the absence of either of the last two options
@ -168,12 +170,14 @@ above, will make `/etc/resolv.conf` inside of each container look like
the `/etc/resolv.conf` of the host machine where the `docker` daemon is
running. The options then modify this default configuration.
## <a name="between-containers"></a>Communication between containers
## Communication between containers
<a name="between-containers"></a>
Whether two containers can communicate is governed, at the operating
system level, by three factors.
1. Does the network topology even connect the containers network
1. Does the network topology even connect the containers' network
interfaces? By default Docker will attach all containers to a
single `docker0` bridge, providing a path for packets to travel
between them. See the later sections of this document for other
@ -259,15 +263,17 @@ the `FORWARD` chain has a default policy of `ACCEPT` or `DROP`:
> **Note**:
> Docker is careful that its host-wide `iptables` rules fully expose
> containers to each others raw IP addresses, so connections from one
> containers to each other's raw IP addresses, so connections from one
> container to another should always appear to be originating from the
> first containers own IP address.
> first container's own IP address.
## <a name="binding-ports"></a>Binding container ports to the host
## Binding container ports to the host
<a name="binding-ports"></a>
By default Docker containers can make connections to the outside world,
but the outside world cannot connect to containers. Each outgoing
connection will appear to originate from one of the host machines own
connection will appear to originate from one of the host machine's own
IP addresses thanks to an `iptables` masquerading rule on the host
machine that the Docker server creates when it starts:
@ -289,7 +295,7 @@ page. There are two approaches.
First, you can supply `-P` or `--publish-all=true|false` to `docker run`
which is a blanket operation that identifies every port with an `EXPOSE`
line in the images `Dockerfile` and maps it to a host port somewhere in
line in the image's `Dockerfile` and maps it to a host port somewhere in
the range 4900049900. This tends to be a bit inconvenient, since you
then have to run other `docker` sub-commands to learn which external
port a given service was mapped to.
@ -336,9 +342,11 @@ Again, this topic is covered without all of these low-level networking
details in the [Docker User Guide](/userguide/dockerlinks/) document if you
would like to use that as your port redirection reference instead.
## <a name="docker0"></a>Customizing docker0
## Customizing docker0
By default, the Docker server creates and configures the host systems
<a name="docker0"></a>
By default, the Docker server creates and configures the host system's
`docker0` interface as an *Ethernet bridge* inside the Linux kernel that
can pass packets back and forth between other physical or virtual
network interfaces so that they behave as a single Ethernet network.
@ -347,7 +355,7 @@ Docker configures `docker0` with an IP address and netmask so the host
machine can both receive and send packets to containers connected to the
bridge, and gives it an MTU — the *maximum transmission unit* or largest
packet length that the interface will allow — of either 1,500 bytes or
else a more specific value copied from the Docker hosts interface that
else a more specific value copied from the Docker host's interface that
supports its default route. Both are configurable at server startup:
* `--bip=CIDR` — supply a specific IP address and netmask for the
@ -380,8 +388,8 @@ install it.
Finally, the `docker0` Ethernet bridge settings are used every time you
create a new container. Docker selects a free IP address from the range
available on the bridge each time you `docker run` a new container, and
configures the containers `eth0` interface with that IP address and the
bridges netmask. The Docker hosts own IP address on the bridge is
configures the container's `eth0` interface with that IP address and the
bridge's netmask. The Docker host's own IP address on the bridge is
used as the default gateway by which each container reaches the rest of
the Internet.
@ -408,7 +416,9 @@ packets out on to the Internet unless its `ip_forward` system setting is
`1` — see the section above on [Communication between
containers](#between-containers) for details.
## <a name="bridge-building"></a>Building your own bridge
## Building your own bridge
<a name="bridge-building"></a>
If you want to take Docker out of the business of creating its own
Ethernet bridge entirely, you can set up your own bridge before starting
@ -450,25 +460,27 @@ illustrate the technique.
The result should be that the Docker server starts successfully and is
now prepared to bind containers to the new bridge. After pausing to
verify the bridges configuration, try creating a container — you will
verify the bridge's configuration, try creating a container — you will
see that its IP address is in your new IP address range, which Docker
will have auto-detected.
Just as we learned in the previous section, you can use the `brctl show`
command to see Docker add and remove interfaces from the bridge as you
start and stop containers, and can run `ip addr` and `ip route` inside a
container to see that it has been given an address in the bridges IP
address range and has been told to use the Docker hosts IP address on
container to see that it has been given an address in the bridge's IP
address range and has been told to use the Docker host's IP address on
the bridge as its default gateway to the rest of the Internet.
## <a name="container-networking"></a>How Docker networks a container
## How Docker networks a container
<a name="container-networking"></a>
While Docker is under active development and continues to tweak and
improve its network configuration logic, the shell commands in this
section are rough equivalents to the steps that Docker takes when
configuring networking for each new container.
Lets review a few basics.
Let's review a few basics.
To communicate using the Internet Protocol (IP), a machine needs access
to at least one network interface at which packets can be sent and
@ -477,11 +489,11 @@ reachable through that interface. Network interfaces do not have to be
physical devices. In fact, the `lo` loopback interface available on
every Linux machine (and inside each Docker container) is entirely
virtual — the Linux kernel simply copies loopback packets directly from
the senders memory into the receivers memory.
the sender's memory into the receiver's memory.
Docker uses special virtual interfaces to let containers communicate
with the host machine — pairs of virtual interfaces called “peers” that
are linked inside of the host machines kernel so that packets can
are linked inside of the host machine's kernel so that packets can
travel between them. They are simple to create, as we will see in a
moment.
@ -495,12 +507,12 @@ The steps with which Docker configures a container are:
3. Toss the other interface over the wall into the new container (which
will already have been provided with an `lo` interface) and rename
it to the much prettier name `eth0` since, inside of the containers
it to the much prettier name `eth0` since, inside of the container's
separate and unique network interface namespace, there are no
physical interfaces with which this name could collide.
4. Give the containers `eth0` a new IP address from within the
bridges range of network addresses, and set its default route to
4. Give the container's `eth0` a new IP address from within the
bridge's range of network addresses, and set its default route to
the IP address that the Docker host owns on the bridge.
With these steps complete, the container now possesses an `eth0`
@ -516,7 +528,7 @@ values.
* `--net=host` — Tells Docker to skip placing the container inside of
a separate network stack. In essence, this choice tells Docker to
**not containerize the containers networking**! While container
**not containerize the container's networking**! While container
processes will still be confined to their own filesystem and process
list and resource limits, a quick `ip addr` command will show you
that, network-wise, they live “outside” in the main Docker host and
@ -524,10 +536,15 @@ values.
**not** let the container reconfigure the host network stack — that
would require `--privileged=true` — but it does let container
processes open low-numbered ports like any other root process.
It also allows the container to access local network services
like D-bus. This can lead to processes in the container being
able to do unexpected things like
[restart your computer](https://github.com/dotcloud/docker/issues/6401).
You should use this option with caution.
* `--net=container:NAME_or_ID` — Tells Docker to put this containers
* `--net=container:NAME_or_ID` — Tells Docker to put this container's
processes inside of the network stack that has already been created
inside of another container. The new containers processes will be
inside of another container. The new container's processes will be
confined to their own filesystem and process list and resource
limits, but will share the same IP address and port numbers as the
first container, and processes on the two containers will be able to
@ -559,7 +576,7 @@ Docker do all of the configuration:
$ sudo mkdir -p /var/run/netns
$ sudo ln -s /proc/$pid/ns/net /var/run/netns/$pid
# Check the bridges IP address and netmask
# Check the bridge's IP address and netmask
$ ip addr show docker0
21: docker0: ...
@ -621,14 +638,16 @@ of the same kinds of configuration. Here are two:
* Brandon Rhodes has created a whole network topology of Docker
containers for the next edition of Foundations of Python Network
Programming that includes routing, NATd firewalls, and servers that
Programming that includes routing, NAT'd firewalls, and servers that
offer HTTP, SMTP, POP, IMAP, Telnet, SSH, and FTP:
<https://github.com/brandon-rhodes/fopnp/tree/m/playground>
Both tools use networking commands very much like the ones you saw in
the previous section, and will see in the following sections.
## <a name="point-to-point"></a>Building a point-to-point connection
## Building a point-to-point connection
<a name="point-to-point"></a>
By default, Docker attaches all containers to the virtual subnet
implemented by `docker0`. You can create containers that are each
@ -646,7 +665,7 @@ The solution is simple: when you create your pair of peer interfaces,
simply throw *both* of them into containers, and configure them as
classic point-to-point links. The two containers will then be able to
communicate directly (provided you manage to tell each container the
others IP address, of course). You might adjust the instructions of
other's IP address, of course). You might adjust the instructions of
the previous section to go something like this:
# Start up two containers in two terminal windows
@ -683,7 +702,7 @@ the previous section to go something like this:
$ sudo ip netns exec 3004 ip route add 10.1.1.1/32 dev B
The two containers should now be able to ping each other and make
connections sucessfully. Point-to-point links like this do not depend
connections successfully. Point-to-point links like this do not depend
on a subnet nor a netmask, but on the bare assertion made by `ip route`
that some other single IP address is connected to a particular network
interface.
@ -691,7 +710,7 @@ interface.
Note that point-to-point links can be safely combined with other kinds
of network connectivity — there is no need to start the containers with
`--net=none` if you want point-to-point links to be an addition to the
containers normal networking instead of a replacement.
container's normal networking instead of a replacement.
A final permutation of this pattern is to create the point-to-point link
between the Docker host and one container, which would allow the host to

View file

@ -112,7 +112,7 @@ use traditional UNIX permission checks to limit access to the control
socket.
You can also expose the REST API over HTTP if you explicitly decide so.
However, if you do that, being aware of the abovementioned security
However, if you do that, being aware of the above mentioned security
implication, you should ensure that it will be reachable only from a
trusted network or VPN; or protected with e.g. `stunnel`
and client SSL certificates.

View file

@ -50,6 +50,13 @@ This command will take some time to complete when you first execute it.
If the build is successful, congratulations! You have produced a clean
build of docker, neatly encapsulated in a standard build environment.
> **Note**:
> On Mac OS X, make targets such as `build`, `binary`, and `test`
> must **not** be built under root. So, for example, instead of the above
> command, issue:
>
> $ make build
## Build the Docker Binary
To create the Docker binary, run this command:

View file

@ -6,9 +6,11 @@ page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub
## Docker Hub Accounts
You can `search` for Docker images and `pull` them from [Docker Hub](https://hub.docker.com)
without signing in or even having an account. However, in order to `push` images,
leave comments or to *star* a repository, you are going to need a [Docker Hub](https://hub.docker.com) account.
You can `search` for Docker images and `pull` them from [Docker
Hub](https://hub.docker.com) without signing in or even having an
account. However, in order to `push` images, leave comments or to *star*
a repository, you are going to need a [Docker
Hub](https://hub.docker.com) account.
### Registration for a Docker Hub Account
@ -29,3 +31,19 @@ https://hub.docker.com/account/resend-email-confirmation/) page.
If you can't access your account for some reason, you can reset your password
from the [*Password Reset*](https://hub.docker.com/account/forgot-password/)
page.
## Organizations & Groups
Also available on the Docker Hub are organizations and groups that allow
you to collaborate across your organization or team. You can see what
organizations [you belong to and add new organizations](Sam Alba
<sam@docker.com>) from the Account
tab.
![organizations](/docker-hub/orgs.png)
From within your organizations you can create groups that allow you to
further manage who can interact with your repositories.
![groups](/docker-hub/groups.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

View file

@ -1,8 +1,23 @@
page_title: The Docker Hub Help
page_description: The Docker Help documentation home
page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, accounts, organizations, repositories, groups
# Docker Hub
## Contents:
![DockerHub](/docker-hub/hub.png)
- [Accounts](accounts/)
- [Repositories](repos/)
- [Automated Builds](builds/)
## [Accounts](accounts/)
[Learn how to create](accounts/) a [Docker Hub](https://hub.docker.com)
account and manage your organizations and groups.
## [Repositories](repos/)
Find out how to share your Docker images in [Docker Hub
repositories](repos/) and how to store and manage private images.
## [Automated Builds](builds/)
Learn how to automate your build and deploy pipeline with [Automated
Builds](builds/)

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

View file

@ -4,63 +4,106 @@ page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub
# Repositories and Images on Docker Hub
![repositories](/docker-hub/repos.png)
## Searching for repositories and images
You can `search` for all the publicly available repositories and images using
Docker. If a repository is not public (i.e., private), it won't be listed on
the repository search results. To see repository statuses, you can look at your
[profile page](https://hub.docker.com) on [Docker Hub](https://hub.docker.com).
Docker.
$ docker search ubuntu
This will show you a list of the currently available repositories on the
Docker Hub which match the provided keyword.
If a repository is private it won't be listed on the repository search
results. To see repository statuses, you can look at your [profile
page](https://hub.docker.com) on [Docker Hub](https://hub.docker.com).
## Repositories
Your Docker Hub repositories have a number of useful features.
### Stars
Stars are a way to show that you like a repository. They are also an easy way
of bookmark your favorites.
Your repositories can be starred and you can star repositories in
return. Stars are a way to show that you like a repository. They are
also an easy way of bookmarking your favorites.
### Comments
You can interact with other members of the Docker community and maintainers by
leaving comments on repositories. If you find any comments that are not
appropriate, you can flag them for the admins' review.
### Private Docker Repositories
To work with a private repository on [Docker Hub](https://hub.docker.com), you
will need to add one via the [Add Repository](https://registry.hub.docker.com/account/repositories/add/)
link. Once the private repository is created, you can `push` and `pull` images
to and from it using Docker.
> *Note:* You need to be signed in and have access to work with a private
> repository.
Private repositories are just like public ones. However, it isn't possible to
browse them or search their content on the public registry. They do not get cached
the same way as a public repository either.
It is possible to give access to a private repository to those whom you
designate (i.e., collaborators) from its settings page.
From there, you can also switch repository status (*public* to *private*, or
viceversa). You will need to have an available private repository slot open
before you can do such a switch. If you don't have any, you can always upgrade
your [Docker Hub](https://registry.hub.docker.com/plans/) plan.
appropriate, you can flag them for review.
### Collaborators and their role
A collaborator is someone you want to give access to a private repository. Once
designated, they can `push` and `pull`. Although, they will not be allowed to
perform any administrative tasks such as deleting the repository or changing its
status from private to public.
A collaborator is someone you want to give access to a private
repository. Once designated, they can `push` and `pull` to your
repositories. They will not be allowed to perform any administrative
tasks such as deleting the repository or changing its status from
private to public.
> **Note:** A collaborator can not add other collaborators. Only the owner of
> **Note:**
> A collaborator cannot add other collaborators. Only the owner of
> the repository has administrative access.
### Webhooks
You can also collaborate on Docker Hub with organizations and groups.
You can read more about that [here](accounts/).
You can configure webhooks on the repository settings page. A webhook is called
only after a successful `push` is made. The webhook calls are HTTP POST requests
with a JSON payload similar to the example shown below.
## Official Repositories
The Docker Hub contains a number of [official
repositories](http://registry.hub.docker.com/official). These are
certified repositories from vendors and contributors to Docker. They
contain Docker images from vendors like Canonical, Oracle, and Red Hat
that you can use to build applications and services.
If you use Official Repositories you know you're using a supported,
optimized and up-to-date image to power your applications.
> **Note:**
> If you would like to contribute an official repository for your
> organization, product or team you can see more information
> [here](https://github.com/dotcloud/stackbrew).
## Private Docker Repositories
Private repositories allow you to have repositories that contain images
that you want to keep private, either to your own account or within an
organization or group.
To work with a private repository on [Docker
Hub](https://hub.docker.com), you will need to add one via the [Add
Repository](https://registry.hub.docker.com/account/repositories/add/)
link. You get one private repository for free with your Docker Hub
account. If you need more accounts you can upgrade your [Docker
Hub](https://registry.hub.docker.com/plans/) plan.
Once the private repository is created, you can `push` and `pull` images
to and from it using Docker.
> *Note:* You need to be signed in and have access to work with a
> private repository.
Private repositories are just like public ones. However, it isn't
possible to browse them or search their content on the public registry.
They do not get cached the same way as a public repository either.
It is possible to give access to a private repository to those whom you
designate (i.e., collaborators) from its Settings page. From there, you
can also switch repository status (*public* to *private*, or
vice-versa). You will need to have an available private repository slot
open before you can do such a switch. If you don't have any available,
you can always upgrade your [Docker
Hub](https://registry.hub.docker.com/plans/) plan.
## Webhooks
You can configure webhooks for your repositories on the Repository
Settings page. A webhook is called only after a successful `push` is
made. The webhook calls are HTTP POST requests with a JSON payload
similar to the example shown below.
> **Note:** For testing, you can try an HTTP request tool like
> [requestb.in](http://requestb.in/).
@ -95,3 +138,7 @@ with a JSON payload similar to the example shown below.
"repo_name":"username/reponame"
}
}
Webhooks allow you to notify people, services and other applications of
new updates to your images and repositories.

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

View file

@ -122,8 +122,7 @@ functionalities:
### What is different between a Docker container and a VM?
There's a great StackOverflow answer [showing the differences](
http://stackoverflow.com/questions/16047306/
how-is-docker-io-different-from-a-normal-virtual-machine).
http://stackoverflow.com/questions/16047306/how-is-docker-io-different-from-a-normal-virtual-machine).
### Do I lose my data when the container exits?
@ -185,8 +184,7 @@ this [mailbox](mailto:security@docker.com).
### Why do I need to sign my commits to Docker with the DCO?
Please read [our blog post](
http://blog.docker.io/2014/01/
docker-code-contributions-require-developer-certificate-of-origin/)
http://blog.docker.io/2014/01/docker-code-contributions-require-developer-certificate-of-origin/)
on the introduction of the DCO.
### Can I help by adding some questions and answers?

View file

@ -17,12 +17,12 @@ Docker consists of:
* The Docker Engine - our lightweight and powerful open source container
virtualization technology combined with a work flow for building
and containerizing your applications.
* [Docker Hub](https://hub.docker.com) - our SAAS service for
* [Docker Hub](https://hub.docker.com) - our SaaS service for
sharing and managing your application stacks.
## Why Docker?
- **Faster delivery of your applications**
- **Faster delivery of your applications**
* We want your environment to work better. Docker containers,
and the work flow that comes with them, help your developers,
sysadmins, QA folks, and release engineers work together to get your code
@ -39,7 +39,7 @@ Docker consists of:
sub-second launch times, reducing the cycle
time of development, testing, and deployment.
- **Deploy and scale more easily**
- **Deploy and scale more easily**
* Docker containers run (almost) everywhere. You can deploy
containers on desktops, physical servers, virtual machines, into
data centers, and up to public and private clouds.
@ -50,13 +50,13 @@ Docker consists of:
down fast and easy. You can quickly launch more containers when
needed and then shut them down easily when they're no longer needed.
- **Get higher density and run more workloads**
- **Get higher density and run more workloads**
* Docker containers don't need a hypervisor, so you can pack more of
them onto your hosts. This means you get more value out of every
server and can potentially reduce what you spend on equipment and
licenses.
- **Faster deployment makes for easier management**
- **Faster deployment makes for easier management**
* As Docker speeds up your work flow, it gets easier to make lots
of small changes instead of huge, big bang updates. Smaller
changes mean reduced risk and more uptime.

View file

@ -0,0 +1 @@
google.md: Johan Euphrosine <proppy@google.com> (@proppy)

View file

@ -7,13 +7,13 @@ page_keywords: Docker, Docker documentation, requirements, boot2docker, VirtualB
> **Note:**
> Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer.
The Docker Engine uses Linux-specific kernel features, so we run it on OS X
using a lightweight virtual machine. You can use the OS X Docker client to
control the virtualized engine to build, run and manage Docker containers.
The Docker Engine uses Linux-specific kernel features, so to run it on OS X
we need to use a lightweight virtual machine (vm). You use the OS X Docker client to
control the virtualized Docker Engine to build, run, and manage Docker containers.
To make this process easier we designed a helper application called
[Boot2Docker](https://github.com/boot2docker/boot2docker) to install the
virtual machine and run the Docker daemon.
To make this process easier, we've designed a helper application called
[Boot2Docker](https://github.com/boot2docker/boot2docker) that installs the
virtual machine and runs the Docker daemon.
## Demonstration
@ -31,7 +31,7 @@ virtual machine and run the Docker daemon.
3. Run the `Boot2Docker` app in the `Applications` folder:
![](/installation/images/osx-Boot2Docker-Start-app.png)
Or to initiate Boot2Docker manually, open a terminal and run:
Or, to initiate Boot2Docker manually, open a terminal and run:
$ boot2docker init
$ boot2docker start
@ -41,8 +41,8 @@ virtual machine and run the Docker daemon.
(but least secure) is to just hit [Enter]. This passphrase is used by the
`boot2docker ssh` command.
Once you have an initialized virtual machine, you can `boot2docker stop`
and `boot2docker start` it.
Once you have an initialized virtual machine, you can control it with `boot2docker stop`
and `boot2docker start`.
## Upgrading
@ -60,36 +60,34 @@ and `boot2docker start` it.
## Running Docker
From your terminal, you can try the “hello world” example. Run:
From your terminal, you can test that Docker is running with a “hello world” example.
Start the vm and then run:
$ docker run ubuntu echo hello world
This will download the `ubuntu` image and print `hello world`.
This should download the `ubuntu` image and print `hello world`.
## Container port redirection
The latest version of `boot2docker` sets up two network adapters: one using NAT
to allow the VM to download images and files from the Internet, and one host only
network adapter to which the container's ports will be exposed on.
The latest version of `boot2docker` sets up a host only network adaptor which provides
access to the container's ports.
If you run a container with an exposed port:
If you run a container with an exposed port,
$ docker run --rm -i -t -p 80:80 apache
$ docker run --rm -i -t -p 80:80 nginx
Then you should be able to access that Apache server using the IP address reported
to you using:
then you should be able to access that Nginx server using the IP address reported by:
$ boot2docker ssh ip addr show dev eth1
Typically, it is 192.168.59.103, but at this point it can change.
If you want to share container ports with other computers on your LAN, you will
need to set up [NAT adaptor based port forwarding](
https://github.com/boot2docker/boot2docker/blob/master/doc/WORKAROUNDS.md)
Typically, it is 192.168.59.103, but it could get changed by Virtualbox's DHCP
implementation.
# Further details
The Boot2Docker management tool provides some commands:
If you are curious, the username for the boot2docker default user is `docker` and the password is `tcuser`.
The Boot2Docker management tool provides several commands:
$ ./boot2docker
Usage: ./boot2docker [<options>]
@ -97,4 +95,4 @@ The Boot2Docker management tool provides some commands:
Continue with the [User Guide](/userguide/).
For further information or to report issues, please see the [Boot2Docker site](http://boot2docker.io).
For further information or to report issues, please visit the [Boot2Docker site](http://boot2docker.io).

View file

@ -17,7 +17,7 @@ Please read [*Docker and UFW*](#docker-and-ufw), if you plan to use [UFW
## Ubuntu Trusty 14.04 (LTS) (64-bit)
Ubuntu Trusty comes with a 3.13.0 Linux kernel, and a `docker.io` package which
installs all its prerequisites from Ubuntu's repository.
installs Docker 0.9.1 and all its prerequisites from Ubuntu's repository.
> **Note**:
> Ubuntu (and Debian) contain a much older KDE3/GNOME2 package called ``docker``, so the
@ -32,13 +32,45 @@ To install the latest Ubuntu package (may not be the latest Docker release):
$ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker
$ sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io
If you'd like to try the latest version of Docker:
First, check that your APT system can deal with `https`
URLs: the file `/usr/lib/apt/methods/https`
should exist. If it doesn't, you need to install the package
`apt-transport-https`.
[ -e /usr/lib/apt/methods/https ] || {
apt-get update
apt-get install apt-transport-https
}
Then, add the Docker repository key to your local keychain.
$ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
Add the Docker repository to your apt sources list, update and install
the `lxc-docker` package.
*You may receive a warning that the package isn't trusted. Answer yes to
continue installation.*
$ sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\
> /etc/apt/sources.list.d/docker.list"
$ sudo apt-get update
$ sudo apt-get install lxc-docker
> **Note**:
>
> There is also a simple `curl` script available to help with this process.
>
> $ curl -s https://get.docker.io/ubuntu/ | sudo sh
To verify that everything has worked as expected:
$ sudo docker run -i -t ubuntu /bin/bash
Which should download the `ubuntu` image, and then start `bash` in a container.
## Ubuntu Precise 12.04 (LTS) (64-bit)
This installation path should work at all times.
@ -284,7 +316,7 @@ Docker daemon for the containers:
$ sudo nano /etc/default/docker
---
# Add:
$ docker_OPTS="--dns 8.8.8.8"
$ DOCKER_OPTS="--dns 8.8.8.8"
# 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1
# multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1

View file

@ -3,14 +3,17 @@ page_description: Docker installation on Microsoft Windows
page_keywords: Docker, Docker documentation, Windows, requirements, virtualbox, boot2docker
# Windows
> **Note:**
> Docker has been tested on Windows 7.1 and 8; it may also run on older versions.
Docker Engine runs on Windows using a lightweight virtual machine. There
is no native Windows Docker client yet, so everything is done inside the virtual
machine.
To make this process easier we designed a helper application called
[Boot2Docker](https://github.com/boot2docker/boot2docker) to install the
virtual machine and run the Docker daemon.
The Docker Engine uses Linux-specific kernel features, so to run it on Windows
we need to use a lightweight virtual machine (vm). You use the Windows Docker client to
control the virtualized Docker Engine to build, run, and manage Docker containers.
To make this process easier, we've designed a helper application called
[Boot2Docker](https://github.com/boot2docker/boot2docker) that installs the
virtual machine and runs the Docker daemon.
## Demonstration
@ -19,8 +22,8 @@ virtual machine and run the Docker daemon.
## Installation
1. Download the latest release of the [Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases)
2. Run the installer, which will install VirtualBox, MSYS-git, the boot2docker Linux ISO and the
Boot2Docker management tool.
2. Run the installer, which will install VirtualBox, MSYS-git, the boot2docker Linux ISO,
and the Boot2Docker management tool.
![](/installation/images/windows-installer.png)
3. Run the `Boot2Docker Start` shell script from your Desktop or Program Files > Docker.
The Start script will ask you to enter an ssh key passphrase - the simplest
@ -46,19 +49,18 @@ virtual machine and run the Docker daemon.
## Running Docker
Boot2Docker will log you in automatically so you can start using Docker
right away.
Boot2Docker will log you in automatically so you can start using Docker right away.
Let's try the “hello world” example. Run
$ docker run busybox echo hello world
This will download the small busybox image and print hello world.
This will download the small busybox image and print "hello world".
# Further Details
The Boot2Docker management tool provides some commands:
The Boot2Docker management tool provides several commands:
$ ./boot2docker
Usage: ./boot2docker [<options>] {help|init|up|ssh|save|down|poweroff|reset|restart|config|status|info|delete|download|version} [<args>]
@ -66,18 +68,20 @@ The Boot2Docker management tool provides some commands:
## Container port redirection
The latest version of `boot2docker` sets up a host only
network adaptor on which the container's ports will be exposed.
If you are curious, the username for the boot2docker default user is `docker` and the password is `tcuser`.
The latest version of `boot2docker` sets up a host only network adaptor which provides access to the container's ports.
If you run a container with an exposed port:
docker run --rm -i -t -p 80:80 apache
docker run --rm -i -t -p 80:80 nginx
Then you should be able to access that Apache server using the IP address reported
Then you should be able to access that nginx server using the IP address reported
to you using:
boot2docker ip
Typically, it is `192.168.59.103`, but it can change.
Typically, it is 192.168.59.103, but it could get changed by Virtualbox's DHCP
implementation.
For further information or to report issues, please see the [Boot2Docker site](http://boot2docker.io)

View file

@ -3,140 +3,129 @@ page_description: Docker explained in depth
page_keywords: docker, introduction, documentation, about, technology, understanding
# Understanding Docker
**What is Docker?**
Docker is a platform for developing, shipping, and running applications.
Docker is designed to deliver your applications faster. With Docker you
can separate your applications from your infrastructure AND treat your
infrastructure like a managed application. We want to help you ship code
faster, test faster, deploy faster and shorten the cycle between writing
code and running code.
Docker is an open platform for developing, shipping, and running applications.
Docker is designed to deliver your applications faster. With Docker you can
separate your applications from your infrastructure AND treat your
infrastructure like a managed application. Docker helps you ship code faster,
test faster, deploy faster, and shorten the cycle between writing code and
running code.
Docker does this by combining a lightweight container virtualization
platform with workflow and tooling that helps you manage and deploy your
applications.
Docker does this by combining a lightweight container virtualization platform
with workflows and tooling that help you manage and deploy your applications.
At its core Docker provides a way to run almost any application securely
isolated into a container. The isolation and security allows you to run
many containers simultaneously on your host. The lightweight nature of
containers, which run without the extra overload of a hypervisor, means
you can get more out of your hardware.
At its core, Docker provides a way to run almost any application securely
isolated in a container. The isolation and security allow you to run many
containers simultaneously on your host. The lightweight nature of containers,
which run without the extra load of a hypervisor, means you can get more out of
your hardware.
Surrounding the container virtualization, we provide tooling and a
platform to help you get your applications (and its supporting
components) into Docker containers, to distribute and ship those
containers to your teams to develop and test on them and then to deploy
those applications to your production environment whether it be in a
local data center or the Cloud.
Surrounding the container virtualization are tooling and a platform which can
help you in several ways:
* getting your applications (and supporting components) into Docker containers
* distributing and shipping those containers to your teams for further development
and testing
* deploying those applications to your production environment,
whether it be in a local data center or the Cloud.
## What can I use Docker for?
* Faster delivery of your applications
*Faster delivery of your applications*
Docker is perfect for helping you with the development lifecycle. Docker
can allow your developers to develop on local containers that contain
your applications and services. It can integrate into a continuous
integration and deployment workflow.
allows your developers to develop on local containers that contain your
applications and services. It can then integrate into a continuous integration and
deployment workflow.
Your developers write code locally and share their development stack via
Docker with their colleagues. When they are ready they can push their
code and the stack they are developing on to a test environment and
execute any required tests. From the testing environment you can then
push your Docker images into production and deploy your code.
For example, your developers write code locally and share their development stack via
Docker with their colleagues. When they are ready, they push their code and the
stack they are developing onto a test environment and execute any required
tests. From the testing environment, you can then push the Docker images into
production and deploy your code.
* Deploy and scale more easily
*Deploying and scaling more easily*
Docker's container platform allows you to have highly portable
workloads. Docker containers can run on a developer's local host, on
physical or virtual machines in a data center or in the Cloud.
Docker's container-based platform allows for highly portable workloads. Docker
containers can run on a developer's local host, on physical or virtual machines
in a data center, or in the Cloud.
Docker's portability and lightweight nature also makes managing
workloads dynamically easy. You can use Docker to build and scale out
applications and services. Docker's speed means that scaling can be near
real time.
Docker's portability and lightweight nature also make dynamically managing
workloads easy. You can use Docker to quickly scale up or tear down applications
and services. Docker's speed means that scaling can be near real time.
* Get higher density and run more workloads
*Achieving higher density and running more workloads**
Docker is lightweight and fast. It provides a viable (and
cost-effective!) alternative to hypervisor-based virtual machines. This
is especially useful in high density environments, for example building
your own Cloud or Platform-as-a-Service. But it is also useful
for small and medium deployments where you want to get more out of the
resources you have.
Docker is lightweight and fast. It provides a viable, cost-effective alternative
to hypervisor-based virtual machines. This is especially useful in high density
environments: for example, building your own Cloud or Platform-as-a-Service. But
it is also useful for small and medium deployments where you want to get more
out of the resources you have.
## What are the major Docker components?
Docker has two major components:
* Docker: the open source container virtualization platform.
* [Docker Hub](https://hub.docker.com): our Software-as-a-Service
platform for sharing and managing Docker containers.
**Note:** Docker is licensed with the open source Apache 2.0 license.
## What is the architecture of Docker?
**Note:** Docker is licensed under the open source Apache 2.0 license.
Docker has a client-server architecture. The Docker *client* talks to
the Docker *daemon* which does the heavy lifting of building, running
and distributing your Docker containers. Both the Docker client and the
daemon *can* run on the same system, or you can connect a Docker client
with a remote Docker daemon. The Docker client and service can
communicate via sockets or through a RESTful API.
## What is Docker's architecture?
Docker uses a client-server architecture. The Docker *client* talks to the
Docker *daemon*, which does the heavy lifting of building, running, and
distributing your Docker containers. Both the Docker client and the daemon *can*
run on the same system, or you can connect a Docker client to a remote Docker
daemon. The Docker client and service communicate via sockets or through a
RESTful API.
![Docker Architecture Diagram](/article-img/architecture.svg)
### The Docker daemon
As shown in the diagram above, the Docker daemon runs on a host machine. The
user does not directly interact with the daemon, but instead through the Docker
client.
As shown on the diagram above, the Docker daemon runs on a host machine.
The user does not directly interact with the daemon, but instead through
the Docker client.
### The Docker client
### The Docker client
The Docker client, in the form of the `docker` binary, is the primary user
interface to Docker. It is tasked with accepting commands from the user
and communicating back and forth with a Docker daemon.
interface to Docker. It accepts commands from the user and communicates back and
forth with a Docker daemon.
### Inside Docker
### Inside Docker
To understand Docker's internals, you need to know about three components:
Inside Docker there are three concepts well need to understand:
* Docker images.
* Docker registries.
* Docker images.
* Docker registries.
* Docker containers.
#### Docker images
The Docker image is a read-only template, for example an Ubuntu operating system
with Apache and your web application installed. Docker containers are
created from images. You can download Docker images that other people
have created or Docker provides a simple way to build new images or
update existing images. You can consider Docker images to be the **build**
portion of Docker.
A Docker image is a read-only template. For example, an image could contain an Ubuntu
operating system with Apache and your web application installed. Images are used to create
Docker containers. Docker provides a simple way to build new images or update existing
images, or you can download Docker images that other people have already created.
Docker images are the **build** component of Docker.
#### Docker Registries
Docker registries hold images. These are public or private stores from which you upload
or download images. The public Docker registry is called
[Docker Hub](http://index.docker.io). It provides a huge collection of existing
images for your use. These can be images you create yourself or you
can use images that others have previously created. Docker registries are the
**distribution** component of Docker.
Docker registries hold images. These are public (or private!) stores
that you can upload or download images to and from. The public Docker
registry is called [Docker Hub](https://hub.docker.com). It provides a
huge collection of existing images that you can use. These images can be
images you create yourself or you can make use of images that others
have previously created. You can consider Docker registries the
**distribution** portion of Docker.
####Docker containers
Docker containers are similar to a directory. A Docker container holds everything that
is needed for an application to run. Each container is created from a Docker
image. Docker containers can be run, started, stopped, moved, and deleted. Each
container is an isolated and secure application platform. Docker containers are the
**run** component of Docker.
#### Docker containers
Docker containers are like a directory. A Docker container holds
everything that is needed for an application to run. Each container is
created from a Docker image. Docker containers can be run, started,
stopped, moved and deleted. Each container is an isolated and secure
application platform. You can consider Docker containers the **run**
portion of Docker.
## So how does Docker work?
We've learned so far that:
##So how does Docker work?
So far, we've learned that:
1. You can build Docker images that hold your applications.
2. You can create Docker containers from those Docker images to run your
@ -146,183 +135,152 @@ We've learned so far that:
Let's look at how these elements combine together to make Docker work.
### How does a Docker Image work?
### How does a Docker Image work?
We've already seen that Docker images are read-only templates from which Docker
containers are launched. Each image consists of a series of layers. Docker
makes use of [union file systems](http://en.wikipedia.org/wiki/UnionFS) to
combine these layers into a single image. Union file systems allow files and
directories of separate file systems, known as branches, to be transparently
overlaid, forming a single coherent file system.
We've already seen that Docker images are read-only templates that
Docker containers are launched from. Each image consists of a series of
layers. Docker makes use of [union file
systems](http://en.wikipedia.org/wiki/UnionFS) to combine these layers
into a single image. Union file systems allow files and directories of
separate file systems, known as branches, to be transparently overlaid,
forming a single coherent file system.
One of the reasons Docker is so lightweight is because of these layers. When you
change a Docker image—for example, update an application to a new version— a new layer
gets built. Thus, rather than replacing the whole image or entirely
rebuilding, as you may do with a virtual machine, only that layer is added or
updated. Now you don't need to distribute a whole new image, just the update,
making distributing Docker images faster and simpler.
One of the reasons Docker is so lightweight is because of these layers.
When you change a Docker image, for example update an application to a
new version, this builds a new layer. Hence, rather than replacing the whole
image or entirely rebuilding, as you may do with a virtual machine, only
that layer is added or updated. Now you don't need to distribute a whole new image,
just the update, making distributing Docker images fast and simple.
Every image starts from a base image, for example `ubuntu`, a base Ubuntu image,
or `fedora`, a base Fedora image. You can also use images of your own as the
basis for a new image, for example if you have a base Apache image you could use
this as the base of all your web application images.
Every image starts from a base image, for example `ubuntu`, a base Ubuntu
image, or `fedora`, a base Fedora image. You can also use images of your
own as the basis for a new image, for example if you have a base Apache
image you could use this as the base of all your web application images.
> **Note:** Docker usually gets these base images from
> [Docker Hub](https://index.docker.io).
>
Docker images are then built from these base images using a simple, descriptive
set of steps we call *instructions*. Each instruction creates a new layer in our
image. Instructions include actions like:
> **Note:**
> Docker usually gets these base images from [Docker Hub](https://hub.docker.com).
Docker images are then built from these base images using a simple
descriptive set of steps we call *instructions*. Each instruction
creates a new layer in our image. Instructions include steps like:
* Run a command.
* Add a file or directory.
* Run a command.
* Add a file or directory.
* Create an environment variable.
* What process to run when launching a container from this image.
These instructions are stored in a file called a `Dockerfile`. Docker
reads this `Dockerfile` when you request an image be built, executes the
instructions and returns a final image.
These instructions are stored in a file called a `Dockerfile`. Docker reads this
`Dockerfile` when you request a build of an image, executes the instructions, and
returns a final image.
### How does a Docker registry work?
The Docker registry is the store for your Docker images. Once you build a Docker
image you can *push* it to a public registry [Docker Hub](https://index.docker.io) or to
your own registry running behind your firewall.
The Docker registry is the store for your Docker images. Once you build
a Docker image you can *push* it to a public registry [Docker
Hub](https://hub.docker.com) or to your own registry running behind your
firewall.
Using the Docker client, you can search for already published images and then
pull them down to your Docker host to build containers from them.
Using the Docker client, you can search for already published images and
then pull them down to your Docker host to build containers from them.
[Docker Hub](https://hub.docker.com) provides both public and
private storage for images. Public storage is searchable and can be
downloaded by anyone. Private storage is excluded from search
results and only you and your users can pull them down and use them to
build containers. You can [sign up for a plan
here](https://registry.hub.docker.com/plans/).
[Docker Hub](https://index.docker.io) provides both public and private storage
for images. Public storage is searchable and can be downloaded by anyone.
Private storage is excluded from search results and only you and your users can
pull images down and use them to build containers. You can [sign up for a storage plan
here](https://index.docker.io/plans).
### How does a container work?
A container consists of an operating system, user added files and
meta-data. As we've discovered each container is built from an image. That image tells
Docker what the container holds, what process to run when the container
is launched and a variety of other configuration data. The Docker image
is read-only. When Docker runs a container from an image it adds a
read-write layer on top of the image (using a union file system as we
saw earlier) in which your application is then run.
A container consists of an operating system, user-added files, and meta-data. As
we've seen, each container is built from an image. That image tells Docker
what the container holds, what process to run when the container is launched, and
a variety of other configuration data. The Docker image is read-only. When
Docker runs a container from an image, it adds a read-write layer on top of the
image (using a union file system as we saw earlier) in which your application can
then run.
### What happens when you run a container?
The Docker client using the `docker` binary, or via the API, tells the
Docker daemon to run a container. Let's take a look at what happens
next.
Either by using the `docker` binary or via the API, the Docker client tells the Docker
daemon to run a container.
$ docker run -i -t ubuntu /bin/bash
Let's break down this command. The Docker client is launched using the
`docker` binary with the `run` option telling it to launch a new
container. The bare minimum the Docker client needs to tell the
Docker daemon to run the container is:
Let's break down this command. The Docker client is launched using the `docker`
binary with the `run` option telling it to launch a new container. The bare
minimum the Docker client needs to tell the Docker daemon to run the container
is:
* What Docker image to build the container from, here `ubuntu`, a base
Ubuntu image;
* What Docker image to build the container from, here `ubuntu`, a base Ubuntu
image;
* The command you want to run inside the container when it is launched,
here `bin/bash` to shell the Bash shell inside the new container.
here `/bin/bash`, to start the Bash shell inside the new container.
So what happens under the covers when we run this command?
So what happens under the hood when we run this command?
Docker begins with:
In order, Docker does the following:
- **Pulling the `ubuntu` image:**
Docker checks for the presence of the `ubuntu` image and if it doesn't
exist locally on the host, then Docker downloads it from
[Docker Hub](https://hub.docker.com). If the image already exists then
Docker uses it for the new container.
- **Creates a new container:**
Once Docker has the image it creates a container from it:
* **Allocates a filesystem and mounts a read-write _layer_:**
The container is created in the file system and a read-write layer is
added to the image.
* **Allocates a network / bridge interface:**
Creates a network interface that allows the Docker container to talk to
the local host.
* **Sets up an IP address:**
Finds and attaches an available IP address from a pool.
- **Executes a process that you specify:**
Runs your application, and;
- **Captures and provides application output:**
Connects and logs standard input, outputs and errors for you to see how
your application is running.
- **Pulls the `ubuntu` image:** Docker checks for the presence of the `ubuntu`
image and, if it doesn't exist locally on the host, then Docker downloads it from
[Docker Hub](https://index.docker.io). If the image already exists, then Docker
uses it for the new container.
- **Creates a new container:** Once Docker has the image, it uses it to create a
container.
- **Allocates a filesystem and mounts a read-write _layer_:** The container is created in
the file system and a read-write layer is added to the image.
- **Allocates a network / bridge interface:** Creates a network interface that allows the
Docker container to talk to the local host.
- **Sets up an IP address:** Finds and attaches an available IP address from a pool.
- **Executes a process that you specify:** Runs your application, and;
- **Captures and provides application output:** Connects and logs standard input, outputs
and errors for you to see how your application is running.
Now you have a running container! From here you can manage your running
container, interact with your application and then when finished stop
and remove your container.
You now have a running container! From here you can manage your container, interact with
your application and then, when finished, stop and remove your container.
## The underlying technology
Docker is written in Go and makes use of several Linux kernel features to
deliver the features we've seen.
deliver the functionality we've seen.
### Namespaces
Docker takes advantage of a technology called `namespaces` to provide the
isolated workspace we call the *container*. When you run a container, Docker
creates a set of *namespaces* for that container.
Docker takes advantage of a technology called `namespaces` to provide an
isolated workspace we call a *container*. When you run a container,
Docker creates a set of *namespaces* for that container.
This provides a layer of isolation: each aspect of a container runs in
its own namespace and does not have access outside it.
This provides a layer of isolation: each aspect of a container runs in its own
namespace and does not have access outside it.
Some of the namespaces that Docker uses are:
- **The `pid` namespace:**
Used for process isolation (PID: Process ID).
- **The `net` namespace:**
Used for managing network interfaces (NET: Networking).
- **The `ipc` namespace:**
Used for managing access to IPC resources (IPC: InterProcess
Communication).
- **The `mnt` namespace:**
Used for managing mount-points (MNT: Mount).
- **The `uts` namespace:**
Used for isolating kernel and version identifiers. (UTS: Unix Timesharing
System).
- **The `pid` namespace:** Used for process isolation (PID: Process ID).
- **The `net` namespace:** Used for managing network interfaces (NET:
Networking).
- **The `ipc` namespace:** Used for managing access to IPC
resources (IPC: InterProcess Communication).
- **The `mnt` namespace:** Used for managing mount-points (MNT: Mount).
- **The `uts` namespace:** Used for isolating kernel and version identifiers. (UTS: Unix
Timesharing System).
### Control groups
Docker also makes use of another technology called `cgroups` or control
groups. A key need to run applications in isolation is to have them only
use the resources you want. This ensures containers are good
multi-tenant citizens on a host. Control groups allow Docker to
share available hardware resources to containers and if required, set up to
limits and constraints, for example limiting the memory available to a
specific container.
Docker also makes use of another technology called `cgroups` or control groups.
A key to running applications in isolation is to have them only use the
resources you want. This ensures containers are good multi-tenant citizens on a
host. Control groups allow Docker to share available hardware resources to
containers and, if required, set up limits and constraints. For example,
limiting the memory available to a specific container.
### Union file systems
Union file systems, or UnionFS, are file systems that operate by creating layers,
making them very lightweight and fast. Docker uses union file systems to provide
the building blocks for containers. Docker can make use of several union file system variants
including: AUFS, btrfs, vfs, and DeviceMapper.
Union file systems or UnionFS are file systems that operate by creating
layers, making them very lightweight and fast. Docker uses union file
systems to provide the building blocks for containers. We learned about
union file systems earlier in this document. Docker can make use of
several union file system variants including: AUFS, btrfs, vfs, and
DeviceMapper.
### Container format
Docker combines these components into a wrapper we call a container
format. The default container format is called `libcontainer`. Docker
also supports traditional Linux containers using
[LXC](https://linuxcontainers.org/). In future Docker may support other
container formats, for example integration with BSD Jails or Solaris
Zones.
### Container format
Docker combines these components into a wrapper we call a container format. The
default container format is called `libcontainer`. Docker also supports
traditional Linux containers using [LXC](https://linuxcontainers.org/). In the
future, Docker may support other container formats, for example, by integrating with
BSD Jails or Solaris Zones.
## Next steps
### Installing Docker
Visit the [installation](/installation/#installation) section.
Visit the [installation section](/installation/#installation).
### The Docker User Guide
[Learn how to use Docker](/userguide/).
[Learn Docker in depth](/userguide/).

View file

@ -52,6 +52,7 @@ interfaces:
- [Docker Remote API](docker_remote_api/)
- [1. Brief introduction](docker_remote_api/#brief-introduction)
- [2. Versions](docker_remote_api/#versions)
- [v1.12](docker_remote_api/#v1-12)
- [v1.11](docker_remote_api/#v1-11)
- [v1.10](docker_remote_api/#v1-10)
- [v1.9](docker_remote_api/#v1-9)
@ -83,4 +84,4 @@ interfaces:
- [1.3 List email addresses for a user](docker_io_accounts_api/#list-email-addresses-for-a-user)
- [1.4 Add email address for a user](docker_io_accounts_api/#add-email-address-for-a-user)
- [1.5 Update an email address for a user](docker_io_accounts_api/#update-an-email-address-for-a-user)
- [1.6 Delete email address for a user](docker_io_accounts_api/#delete-email-address-for-a-user)
- [1.6 Delete email address for a user](docker_io_accounts_api/#delete-email-address-for-a-user)

View file

@ -117,7 +117,7 @@ an Authorization Code.
## 3.2 Get an Access Token
Once the user has authorized your application, a request will be made to
your application'sspecified `redirect_uri` which
your application's specified `redirect_uri` which
includes a `code` parameter that you must then use
to get an Access Token.

View file

@ -4,29 +4,27 @@ page_keywords: API, Docker, rcli, REST, documentation
# Docker Remote API
- The Remote API is replacing rcli
- By default the Docker daemon listens on unix:///var/run/docker.sock
and the client must have root access to interact with the daemon
- If a group named *docker* exists on your system, docker will apply
ownership of the socket to the group
- The Remote API is replacing `rcli`.
- By default the Docker daemon listens on `unix:///var/run/docker.sock`
and the client must have `root` access to interact with the daemon.
- If a group named `docker` exists on your system, docker will apply
ownership of the socket to the group.
- The API tends to be REST, but for some complex commands, like attach
or pull, the HTTP connection is hijacked to transport stdout stdin
and stderr
or pull, the HTTP connection is hijacked to transport STDOUT, STDIN,
and STDERR.
- Since API version 1.2, the auth configuration is now handled client
side, so the client has to send the authConfig as POST in /images/(name)/push
side, so the client has to send the `authConfig` as a `POST` in `/images/(name)/push`.
- authConfig, set as the `X-Registry-Auth` header, is currently a Base64
encoded (json) string with credentials:
encoded (JSON) string with credentials:
`{'username': string, 'password': string, 'email': string, 'serveraddress' : string}`
The current version of the API is v1.12
Calling /images/<name>/insert is the same as calling
/v1.12/images/<name>/insert
Calling `/images/<name>/insert` is the same as calling
`/v1.12/images/<name>/insert`.
You can still call an old version of the api using
/v1.12/images/<name>/insert
You can still call an old version of the API using
`/v1.12/images/<name>/insert`.
## v1.12
@ -51,7 +49,7 @@ All the JSON keys are now in CamelCase
Trusted builds are now Automated Builds - `is_trusted` is now `is_automated`.
**Removed Insert Endpoint**
The insert endpoint has been removed.
The `insert` endpoint has been removed.
## v1.11
@ -96,7 +94,7 @@ You can now use the force parameter to force delete of an
`DELETE /containers/(id)`
**New!**
You can now use the force paramter to force delete a
You can now use the force parameter to force delete a
container, even if it is currently running
## v1.9

View file

@ -6,12 +6,12 @@ page_keywords: API, Docker, rcli, REST, documentation
## 1. Brief introduction
- The Remote API has replaced rcli
- The Remote API has replaced `rcli`.
- The daemon listens on `unix:///var/run/docker.sock` but you can bind
Docker to another host/port or a Unix socket.
- The API tends to be REST, but for some complex commands, like `attach`
or `pull`, the HTTP connection is hijacked to transport `stdout, stdin`
and `stderr`
or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN`
and `STDERR`.
# 2. Endpoints

View file

@ -6,13 +6,13 @@ page_keywords: API, Docker, rcli, REST, documentation
## 1. Brief introduction
- The Remote API has replaced rcli
- The Remote API has replaced `rcli`.
- The daemon listens on `unix:///var/run/docker.sock` but you can
[*Bind Docker to another host/port or a Unix socket*](
/use/basics/#bind-docker).
- The API tends to be REST, but for some complex commands, like `attach`
or `pull`, the HTTP connection is hijacked to transport `stdout, stdin`
and `stderr`
or `pull`, the HTTP connection is hijacked to transport `STDOUT`,
`STDIN` and `STDERR`.
# 2. Endpoints
@ -406,6 +406,7 @@ Start the container `id`
{
"Binds":["/tmp:/tmp"],
"Links":["redis3:redis"],
"LxcConf":{"lxc.utsname":"docker"},
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
"PublishAllPorts":false,
@ -1164,6 +1165,7 @@ Show the docker version information
Content-Type: application/json
{
"ApiVersion":"1.12",
"Version":"0.2.2",
"GitCommit":"5a2a5cc+CHANGES",
"GoVersion":"go1.0.3"

View file

@ -1,29 +1,29 @@
page_title: Registry Documentation
page_description: Documentation for docker Registry and Registry API
page_keywords: docker, registry, api, index
page_keywords: docker, registry, api, hub
# Registry & Index Spec
# The Docker Hub and the Registry spec
## The 3 roles
### Index
### Docker Hub
The Index is responsible for centralizing information about:
The Docker Hub is responsible for centralizing information about:
- User accounts
- Checksums of the images
- Public namespaces
The Index has different components:
The Docker Hub has different components:
- Web UI
- Meta-data store (comments, stars, list public repositories)
- Authentication service
- Tokenization
The index is authoritative for those information.
The Docker Hub is authoritative for those information.
We expect that there will be only one instance of the index, run and
We expect that there will be only one instance of the Docker Hub, run and
managed by Docker Inc.
### Registry
@ -31,7 +31,7 @@ managed by Docker Inc.
- It stores the images and the graph for a set of repositories
- It does not have user accounts data
- It has no notion of user accounts or authorization
- It delegates authentication and authorization to the Index Auth
- It delegates authentication and authorization to the Docker Hub Auth
service using tokens
- It supports different storage backends (S3, cloud files, local FS)
- It doesn't have a local database
@ -45,7 +45,7 @@ grasp the context, here are some examples of registries:
docker community as a whole. Its costs are supported by the third
party, but the management and operation of the registry are
supported by dotCloud. It features read/write access, and delegates
authentication and authorization to the Index.
authentication and authorization to the Docker Hub.
- **mirror registry**: such a registry is provided by a third-party
hosting infrastructure but is targeted at their customers only. Some
mechanism (unspecified to date) ensures that public images are
@ -57,7 +57,7 @@ grasp the context, here are some examples of registries:
and managed by the vendor. Only users authorized by the vendor would
be able to get write access. Some images would be public (accessible
for anyone), others private (accessible only for authorized users).
Authentication and authorization would be delegated to the Index.
Authentication and authorization would be delegated to the Docker Hub.
The goal of vendor registries is to let someone do “docker pull
basho/riak1.3” and automatically push from the vendor registry
(instead of a sponsor registry); i.e. get all the convenience of a
@ -67,7 +67,7 @@ grasp the context, here are some examples of registries:
SSL client-side certificates, IP address authorization...). The
registry is operated by a private entity, outside of dotCloud's
control. It can optionally delegate additional authorization to the
Index, but it is not mandatory.
Docker Hub, but it is not mandatory.
> **Note:** The latter implies that while HTTP is the protocol
> of choice for a registry, multiple schemes are possible (and
@ -89,7 +89,7 @@ On top of being a runtime for LXC, Docker is the Registry client. It
supports:
- Push / Pull on the registry
- Client authentication on the Index
- Client authentication on the Docker Hub
## Workflow
@ -97,15 +97,15 @@ supports:
![](/static_files/docker_pull_chart.png)
1. Contact the Index to know where I should download “samalba/busybox”
2. Index replies: a. `samalba/busybox` is on Registry A b. here are the
1. Contact the Docker Hub to know where I should download “samalba/busybox”
2. Docker Hub replies: a. `samalba/busybox` is on Registry A b. here are the
checksums for `samalba/busybox` (for all layers) c. token
3. Contact Registry A to receive the layers for `samalba/busybox` (all of
them to the base image). Registry A is authoritative for “samalba/busybox”
but keeps a copy of all inherited layers and serve them all from the same
location.
4. registry contacts index to verify if token/user is allowed to download images
5. Index returns true/false lettings registry know if it should proceed or error
4. registry contacts Docker Hub to verify if token/user is allowed to download images
5. Docker Hub returns true/false lettings registry know if it should proceed or error
out
6. Get the payload for all layers
@ -113,7 +113,7 @@ It's possible to run:
$ docker pull https://<registry>/repositories/samalba/busybox
In this case, Docker bypasses the Index. However the security is not
In this case, Docker bypasses the Docker Hub. However the security is not
guaranteed (in case Registry A is corrupted) because there won't be any
checksum checks.
@ -131,7 +131,7 @@ and for an active account.
**API (pulling repository foo/bar):**
1. (Docker -> Index) GET /v1/repositories/foo/bar/images:
1. (Docker -> Docker Hub) GET /v1/repositories/foo/bar/images:
**Headers**:
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
@ -142,7 +142,7 @@ and for an active account.
for that repo (all if no tag is specified, if tag, only
checksums for those tags) see part 4.4.1)
2. (Index -> Docker) HTTP 200 OK
2. (Docker Hub -> Docker) HTTP 200 OK
**Headers**:
Authorization: Token
@ -158,7 +158,7 @@ and for an active account.
Authorization: Token
signature=123abc,repository=”foo/bar”,access=write
4. (Registry -> Index) GET /v1/repositories/foo/bar/images
4. (Registry -> Docker Hub) GET /v1/repositories/foo/bar/images
**Headers**:
Authorization: Token
@ -171,7 +171,7 @@ and for an active account.
(Lookup token see if they have access to pull.)
If good:
HTTP 200 OK Index will invalidate the token
HTTP 200 OK Docker Hub will invalidate the token
If bad:
HTTP 401 Unauthorized
@ -189,43 +189,43 @@ and for an active account.
![](/static_files/docker_push_chart.png)
1. Contact the index to allocate the repository name “samalba/busybox”
1. Contact the Docker Hub to allocate the repository name “samalba/busybox”
(authentication required with user credentials)
2. If authentication works and namespace available, “samalba/busybox”
is allocated and a temporary token is returned (namespace is marked
as initialized in index)
as initialized in Docker Hub)
3. Push the image on the registry (along with the token)
4. Registry A contacts the Index to verify the token (token must
4. Registry A contacts the Docker Hub to verify the token (token must
corresponds to the repository name)
5. Index validates the token. Registry A starts reading the stream
5. Docker Hub validates the token. Registry A starts reading the stream
pushed by docker and store the repository (with its images)
6. docker contacts the index to give checksums for upload images
6. docker contacts the Docker Hub to give checksums for upload images
> **Note:**
> **It's possible not to use the Index at all!** In this case, a deployed
> **It's possible not to use the Docker Hub at all!** In this case, a deployed
> version of the Registry is deployed to store and serve images. Those
> images are not authenticated and the security is not guaranteed.
> **Note:**
> **Index can be replaced!** For a private Registry deployed, a custom
> Index can be used to serve and validate token according to different
> **Docker Hub can be replaced!** For a private Registry deployed, a custom
> Docker Hub can be used to serve and validate token according to different
> policies.
Docker computes the checksums and submit them to the Index at the end of
the push. When a repository name does not have checksums on the Index,
Docker computes the checksums and submit them to the Docker Hub at the end of
the push. When a repository name does not have checksums on the Docker Hub,
it means that the push is in progress (since checksums are submitted at
the end).
**API (pushing repos foo/bar):**
1. (Docker -> Index) PUT /v1/repositories/foo/bar/
1. (Docker -> Docker Hub) PUT /v1/repositories/foo/bar/
**Headers**:
Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token:
true
**Action**:
- in index, we allocated a new repository, and set to
- in Docker Hub, we allocated a new repository, and set to
initialized
**Body**:
@ -235,7 +235,7 @@ the end).
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}]
2. (Index -> Docker) 200 Created
2. (Docker Hub -> Docker) 200 Created
**Headers**:
- WWW-Authenticate: Token
@ -249,14 +249,14 @@ the end).
Authorization: Token
signature=123abc,repository=”foo/bar”,access=write
4. (Registry->Index) GET /v1/repositories/foo/bar/images
4. (Registry->Docker Hub) GET /v1/repositories/foo/bar/images
**Headers**:
Authorization: Token
signature=123abc,repository=”foo/bar”,access=write
**Action**:
- Index:
- Docker Hub:
will invalidate the token.
- Registry:
grants a session (if token is approved) and fetches
@ -292,7 +292,7 @@ the end).
**Body**:
“98765432”
10. (Docker -> Index) PUT /v1/repositories/foo/bar/images
10. (Docker -> Docker Hub) PUT /v1/repositories/foo/bar/images
**Headers**:
Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints:
@ -307,33 +307,33 @@ the end).
**Return**: HTTP 204
> **Note:** If push fails and they need to start again, what happens in the index,
> **Note:** If push fails and they need to start again, what happens in the Docker Hub,
> there will already be a record for the namespace/name, but it will be
> initialized. Should we allow it, or mark as name already used? One edge
> case could be if someone pushes the same thing at the same time with two
> different shells.
If it's a retry on the Registry, Docker has a cookie (provided by the
registry after token validation). So the Index won't have to provide a
registry after token validation). So the Docker Hub won't have to provide a
new token.
### Delete
If you need to delete something from the index or registry, we need a
If you need to delete something from the Docker Hub or registry, we need a
nice clean way to do that. Here is the workflow.
1. Docker contacts the index to request a delete of a repository
1. Docker contacts the Docker Hub to request a delete of a repository
`samalba/busybox` (authentication required with user credentials)
2. If authentication works and repository is valid, `samalba/busybox`
is marked as deleted and a temporary token is returned
3. Send a delete request to the registry for the repository (along with
the token)
4. Registry A contacts the Index to verify the token (token must
4. Registry A contacts the Docker Hub to verify the token (token must
corresponds to the repository name)
5. Index validates the token. Registry A deletes the repository and
5. Docker Hub validates the token. Registry A deletes the repository and
everything associated to it.
6. docker contacts the index to let it know it was removed from the
registry, the index removes all records from the database.
6. docker contacts the Docker Hub to let it know it was removed from the
registry, the Docker Hub removes all records from the database.
> **Note**:
> The Docker client should present an "Are you sure?" prompt to confirm
@ -342,20 +342,20 @@ nice clean way to do that. Here is the workflow.
**API (deleting repository foo/bar):**
1. (Docker -> Index) DELETE /v1/repositories/foo/bar/
1. (Docker -> Docker Hub) DELETE /v1/repositories/foo/bar/
**Headers**:
Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token:
true
**Action**:
- in index, we make sure it is a valid repository, and set
- in Docker Hub, we make sure it is a valid repository, and set
to deleted (logically)
**Body**:
Empty
2. (Index -> Docker) 202 Accepted
2. (Docker Hub -> Docker) 202 Accepted
**Headers**:
- WWW-Authenticate: Token
@ -370,14 +370,14 @@ nice clean way to do that. Here is the workflow.
Authorization: Token
signature=123abc,repository=”foo/bar”,access=delete
4. (Registry->Index) PUT /v1/repositories/foo/bar/auth
4. (Registry->Docker Hub) PUT /v1/repositories/foo/bar/auth
**Headers**:
Authorization: Token
signature=123abc,repository=”foo/bar”,access=delete
**Action**:
- Index:
- Docker Hub:
will invalidate the token.
- Registry:
deletes the repository (if token is approved)
@ -387,7 +387,7 @@ nice clean way to do that. Here is the workflow.
200 If success 403 if forbidden 400 if bad request 404
if repository isn't found
6. (Docker -> Index) DELETE /v1/repositories/foo/bar/
6. (Docker -> Docker Hub) DELETE /v1/repositories/foo/bar/
**Headers**:
Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints:
@ -400,7 +400,7 @@ nice clean way to do that. Here is the workflow.
## How to use the Registry in standalone mode
The Index has two main purposes (along with its fancy social features):
The Docker Hub has two main purposes (along with its fancy social features):
- Resolve short names (to avoid passing absolute URLs all the time):
@ -412,15 +412,15 @@ The Index has two main purposes (along with its fancy social features):
- Authenticate a user as a repos owner (for a central referenced
repository)
### Without an Index
### Without an Docker Hub
Using the Registry without the Index can be useful to store the images
Using the Registry without the Docker Hub can be useful to store the images
on a private network without having to rely on an external entity
controlled by Docker Inc.
In this case, the registry will be launched in a special mode
(standalone? no-index?). In this mode, the only thing which changes is
that Registry will never contact the Index to verify a token. It will be
(standalone? ne? no-index?). In this mode, the only thing which changes is
that Registry will never contact the Docker Hub to verify a token. It will be
the Registry owner responsibility to authenticate the user who pushes
(or even pulls) an image using any mechanism (HTTP auth, IP based,
etc...).
@ -433,21 +433,21 @@ As hinted previously, a standalone registry can also be implemented by
any HTTP server handling GET/PUT requests (or even only GET requests if
no write access is necessary).
### With an Index
### With an Docker Hub
The Index data needed by the Registry are simple:
The Docker Hub data needed by the Registry are simple:
- Serve the checksums
- Provide and authorize a Token
In the scenario of a Registry running on a private network with the need
of centralizing and authorizing, it's easy to use a custom Index.
of centralizing and authorizing, it's easy to use a custom Docker Hub.
The only challenge will be to tell Docker to contact (and trust) this
custom Index. Docker will be configurable at some point to use a
specific Index, it'll be the private entity responsibility (basically
custom Docker Hub. Docker will be configurable at some point to use a
specific Docker Hub, it'll be the private entity responsibility (basically
the organization who uses Docker in a private environment) to maintain
the Index and the Docker's configuration among its consumers.
the Docker Hub and the Docker's configuration among its consumers.
## The API
@ -474,7 +474,7 @@ file is empty.
### Users
### Create a user (Index)
### Create a user (Docker Hub)
POST /v1/users:
@ -497,7 +497,7 @@ etc) - forbidden name - name already exists
> A user account will be valid only if the email has been validated (a
> validation link is sent to the email address).
### Update a user (Index)
### Update a user (Docker Hub)
PUT /v1/users/<username>
@ -508,7 +508,7 @@ etc) - forbidden name - name already exists
> We can also update email address, if they do, they will need to reverify
> their new email address.
### Login (Index)
### Login (Docker Hub)
Does nothing else but asking for a user authentication. Can be used to
validate credentials. HTTP Basic Auth for now, maybe change in future.
@ -525,7 +525,7 @@ GET /v1/users
The Registry does not know anything about users. Even though
repositories are under usernames, it's just a namespace for the
registry. Allowing us to implement organizations or different namespaces
per user later, without modifying the Registry'sAPI.
per user later, without modifying the Registry's API.
The following naming restrictions apply:
@ -554,9 +554,9 @@ GET /v1/repositories/<namespace>/<repository_name>/tags
DELETE /v1/repositories/<namespace>/<repo_name>/tags/<tag>
### 4.4 Images (Index)
### 4.4 Images (Docker Hub)
For the Index to “resolve” the repository name to a Registry location,
For the Docker Hub to “resolve” the repository name to a Registry location,
it uses the X-Docker-Endpoints header. In other terms, this requests
always add a `X-Docker-Endpoints` to indicate the
location of the registry which hosts this repository.
@ -594,7 +594,7 @@ DELETE /v1/repositories/<namespace>/<repo_name>
Return 200 OK
### Remove a Repository (Index)
### Remove a Repository (Docker Hub)
This starts the delete process. see 2.3 for more details.
@ -613,7 +613,7 @@ When a Registry is a reference for a repository, it should host the
entire images chain in order to avoid breaking the chain during the
download.
The Index and Registry use this mechanism to redirect on one or the
The Docker Hub and Registry use this mechanism to redirect on one or the
other.
Example with an image download:
@ -627,10 +627,10 @@ list.
## Authentication & Authorization
### On the Index
### On the Docker Hub
The Index supports both “Basic” and “Token” challenges. Usually when
there is a `401 Unauthorized`, the Index replies
The Docker Hub supports both “Basic” and “Token” challenges. Usually when
there is a `401 Unauthorized`, the Docker Hub replies
this:
401 Unauthorized

View file

@ -77,7 +77,7 @@ will add the libraries here.
<tr class="row-even">
<td>Java</td>
<td>docker-java</td>
<td><a class="reference external" href="https://github.com/kpelykh/docker-java">https://github.com/kpelykh/docker-java</a></td>
<td><a class="reference external" href="https://github.com/docker-java/docker-java">https://github.com/docker-java/docker-java</a></td>
<td>Active</td>
</tr>
<tr class="row-odd">
@ -122,11 +122,17 @@ will add the libraries here.
<td><a class="reference external" href="https://github.com/alambike/eixo-docker">https://github.com/alambike/eixo-docker</a></td>
<td>Active</td>
</tr>
<tr class="row-odd">
<tr class="row-even">
<td>Scala</td>
<td>reactive-docker</td>
<td><a class="reference external" href="https://github.com/almoehi/reactive-docker">https://github.com/almoehi/reactive-docker</a></td>
<td>Active</td>
</tr>
<tr class="row-odd">
<td>Java</td>
<td>docker-client</td>
<td><a class="reference external" href="https://github.com/spotify/docker-client">https://github.com/spotify/docker-client</a></td>
<td>Active</td>
</tr>
</tbody>
</table>

View file

@ -216,7 +216,7 @@ from the resulting image. You can view the values using `docker inspect`, and
change them using `docker run --env <key>=<value>`.
> **Note**:
> One example where this can cause unexpected consequenses, is setting
> One example where this can cause unexpected consequences, is setting
> `ENV DEBIAN_FRONTEND noninteractive`. Which will persist when the container
> is run interactively; for example: `docker run -t -i image bash`
@ -468,7 +468,7 @@ For example you might add something like this:
# VERSION 0.0.1
FROM ubuntu
MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
MAINTAINER Victor Vieux <victor@docker.com>
# make sure the package repository is up to date
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list

View file

@ -448,7 +448,7 @@ To see how the `docker:latest` image was built:
The default `docker images` will show all top level
images, their repository and tags, and their virtual size.
Docker images have intermediate layers that increase reuseability,
Docker images have intermediate layers that increase reusability,
decrease disk usage, and speed up `docker build` by
allowing each step to be cached. These intermediate layers are not shown
by default.
@ -735,7 +735,7 @@ Running `docker ps` showing 2 linked containers.
## pull
Usage: docker pull NAME[:TAG]
Usage: docker pull [REGISTRY_PATH/]NAME[:TAG]
Pull an image or a repository from the registry
@ -745,6 +745,11 @@ Most of your images will be created on top of a base image from the
[Docker Hub](https://hub.docker.com) contains many pre-built images that you
can `pull` and try without needing to define and configure your own.
It is also possible to manually specify the path of a registry to pull from.
For example, if you have set up a local registry, you can specify its path to
pull from it. A repository path is similar to a URL, but does not contain
a protocol specifier (https://, for example).
To download a particular image, or set of images (i.e., a repository),
use `docker pull`:
@ -752,8 +757,11 @@ use `docker pull`:
# will pull all the images in the debian repository
$ docker pull debian:testing
# will pull only the image named debian:testing and any intermediate layers
# it is based on. (typically the empty `scratch` image, a MAINTAINERs layer,
# and the un-tared base.
# it is based on. (Typically the empty `scratch` image, a MAINTAINERs layer,
# and the un-tarred base).
$ docker pull registry.hub.docker.com/debian
# manually specifies the path to the default Docker registry. This could
# be replaced with the path to a local registry to pull from another source.
## push
@ -873,7 +881,7 @@ removed before the image is removed.
'bridge': creates a new network stack for the container on the docker bridge
'none': no networking for this container
'container:<name|id>': reuses another container network stack
'host': use the host network stack inside the contaner
'host': use the host network stack inside the container
-p, --publish=[] Publish a container's port to the host
format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort
(use 'docker port' to see the actual mapping)
@ -1066,7 +1074,7 @@ retrieve the container's ID once the container has finished running.
$ sudo docker run -d --name static static-web-files sh
$ sudo docker run -d --expose=8098 --name riak riakserver
$ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver
$ sudo docker run -d -p 1443:443 --dns=dns.dev.org --dns-search=dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver
$ sudo docker run -d -p 1443:443 --dns=10.0.0.1 --dns-search=dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver
$ sudo docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log
This example shows 5 containers that might be set up to test a web
@ -1081,7 +1089,7 @@ application change:
two environment variables `DEVELOPMENT` and `BRANCH` and bind-mounting the
current directory (`$(pwd)`) in the container in read-only mode as `/app/bin`;
4. Start the `webserver`, mapping port `443` in the container to port `1443` on
the Docker server, setting the DNS server to `dns.dev.org` and DNS search
the Docker server, setting the DNS server to `10.0.0.1` and DNS search
domain to `dev.org`, creating a volume to put the log files into (so we can
access it from another container), then importing the files from the volume
exposed by the `static` container, and linking to all exposed ports from

View file

@ -137,7 +137,7 @@ PID files):
'bridge': creates a new network stack for the container on the docker bridge
'none': no networking for this container
'container:<name|id>': reuses another container network stack
'host': use the host network stack inside the contaner
'host': use the host network stack inside the container
By default, all containers have networking enabled and they can make any
outgoing connections. The operator can completely disable networking
@ -152,7 +152,7 @@ Supported networking modes are:
* none - no networking in the container
* bridge - (default) connect the container to the bridge via veth interfaces
* host - use the host's network stack inside the container
* host - use the host's network stack inside the container. Note: This gives the container full access to local system services such as D-bus and is therefore considered insecure.
* container - use another container's network stack
#### Mode: none

View file

@ -4,70 +4,69 @@ page_keywords: documentation, docs, the docker guide, docker guide, docker, dock
# Getting Started with Docker Hub
*How do I use Docker Hub?*
In this section we're going to introduce you, very quickly!, to
[Docker Hub](https://hub.docker.com) and create an account.
This section provides a quick introduction to the [Docker Hub](https://hub.docker.com)
and will show you how to create an account.
[Docker Hub](https://www.docker.io) is the central hub for Docker. It
helps you to manage Docker and its components. It provides services such
as:
The [Docker Hub](https://hub.docker.com) is a centralized resource for working with
Docker and its components. Docker Hub helps you collaborate with colleagues and get the
most out of Docker.To do this, it provides services such as:
* Hosting images.
* Docker image hosting.
* User authentication.
* Automated image builds and work flow tools like build triggers and web
* Automated image builds and work-flow tools such as build triggers and web
hooks.
* Integration with GitHub and BitBucket.
Docker Hub helps you collaborate with colleagues and get the most out of
Docker.
In order to use Docker Hub you will need to register an account. Don't
panic! It's totally free and really easy.
In order to use Docker Hub, you will first need to register and create an account. Don't
worry, creating an account is simple and free.
## Creating a Docker Hub Account
There are two ways you can create a Docker Hub account:
There are two ways for you to register and create a Docker Hub account:
* Via the web, or
* Via the command line.
1. Via the web, or
2. Via the command line.
### Sign up via the web!
### Register via the web
Fill in the [sign-up form](https://www.docker.io/account/signup/) and
choose your user name and specify some details such as an email address.
Fill in the [sign-up form](https://hub.docker.com/account/signup/) by
choosing your user name and password and specifying email address. You can also sign up
for the Docker Weekly mailing list, which has lots of information about what's going on
in the world of Docker.
![Register using the sign-up page](/userguide/register-web.png)
### Signup via the command line
### Register via the command line
You can also create a Docker Hub account via the command line using the
You can also create a Docker Hub account via the command line with the
`docker login` command.
$ sudo docker login
### Confirm your email
Once you've filled in the form then check your email for a welcome
message and activate your account.
Once you've filled in the form, check your email for a welcome message and confirmation
to activate your account.
![Confirm your registration](/userguide/register-confirm.png)
### Login!
### Login
Then you can login using the web console:
After you complete the confirmation process, you can login using the web console:
![Login using the web console](/userguide/login-web.png)
Or via the command line and the `docker login` command:
Or via the command line with the `docker login` command:
$ sudo docker login
Now your Docker Hub account is active and ready for you to use!
Your Docker Hub account is now active and ready for you to use!
## Next steps
Now let's start Dockerizing applications with our "Hello World!" exercise.
Next, let's start learning how to Dockerize applications with our "Hello World!"
exercise.
Go to [Dockerizing Applications](/userguide/dockerizing).

View file

@ -70,7 +70,7 @@ If instead we wanted to build an Ubuntu 12.04 image we'd use:
$ sudo docker run -t -i ubuntu:12.04 /bin/bash
If you don't specify a variant, for example you just use `ubuntu`, then Docker
will default to using the `ubunut:latest` image.
will default to using the `ubuntu:latest` image.
> **Tip:**
> We recommend you always use a specific tagged image, for example
@ -330,7 +330,7 @@ containers will get removed to clean things up.
We can then create a container from our new image.
$ sudo docker run -t -i ouruser/sinatra /bin/bash
$ sudo docker run -t -i ouruser/sinatra:v2 /bin/bash
root@8196968dac35:/#
> **Note:**

View file

@ -13,7 +13,7 @@ application inside a container takes a single command: `docker run`.
Let's try it now.
$ sudo docker run ubuntu:14.04 /bin/echo "Hello World!"
$ sudo docker run ubuntu:14.04 /bin/echo 'Hello World'
Hello World!
And you just launched your first container!
@ -34,7 +34,7 @@ image registry: [Docker Hub](https://hub.docker.com).
Next we told Docker what command to run inside our new container:
/bin/echo "Hello World!"
/bin/echo 'Hello World!'
When our container was launched Docker created a new Ubuntu 14.04
environment and then executed the `/bin/echo` command inside it. We saw
@ -134,7 +134,7 @@ do that with the `docker ps` command. The `docker ps` command queries
the Docker daemon for information about all the container it knows
about.
$ docker ps
$ sudo docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1e5535038e28 ubuntu:14.04 /bin/sh -c 'while tr 2 minutes ago Up 1 minute insane_babbage

View file

@ -68,7 +68,7 @@ configurations. For example if we've bound the container port to the
`localhost` on the host machine this will be shown in the `docker port`
output.
$ docker port nostalgic_morse
$ docker port nostalgic_morse 5000
127.0.0.1:49155
> **Note:**
@ -171,14 +171,11 @@ child container in two ways:
* Environment variables,
* Updating the `/etc/host` file.
Let's look first at the environment variables Docker sets. Inside the `web`
container let's run the `env` command to list the container's environment
variables.
root@aed84ee21bde:/opt/webapp# env
HOSTNAME=aed84ee21bde
Let's look first at the environment variables Docker sets. Let's run the `env`
command to list the container's environment variables.
$ sudo docker run --rm --name web2 --link db:db training/webapp env
. . .
DB_NAME=/web/db
DB_NAME=/web2/db
DB_PORT=tcp://172.17.0.5:5432
DB_PORT_5000_TCP=tcp://172.17.0.5:5432
DB_PORT_5000_TCP_PROTO=tcp

View file

@ -147,7 +147,7 @@ It will stay in sync with your GitHub and BitBucket repository until you
deactivate the Automated Build.
If you want to see the status of your Automated Builds you can go to your
[Automated Builds page](https://registry.hub.docker.io/builds/) on the Docker Hub,
[Automated Builds page](https://registry.hub.docker.com/builds/) on the Docker Hub,
and it will show you the status of your builds, and the build history.
Once you've created an Automated Build you can deactivate or delete it. You

View file

@ -4,7 +4,7 @@ page_keywords: Examples, Usage, volume, docker, documentation, user guide, data,
# Managing Data in Containers
So far we've been introduced some [basic Docker
So far we've been introduced to some [basic Docker
concepts](/userguide/usingdocker/), seen how to work with [Docker
images](/userguide/dockerimages/) as well as learned about [networking
and links between containers](/userguide/dockerlinks/). In this section

View file

@ -183,6 +183,16 @@ see the application.
Our Python application is live!
> **Note:**
> If you have used boot2docker on OSX you'll need to get the IP of the virtual
> host instead of using localhost. You can do this by running the following in
> the boot2docker shell.
>
> $ boot2docker ip
> The VM's Host only interface IP address is: 192.168.59.103
>
> In this case you'd browse to http://192.168.59.103:49155 for the above example.
## A Network Port Shortcut
Using the `docker ps` command to return the mapped port is a bit clumsy so

View file

@ -66,7 +66,7 @@
</div>
</div>
<div id="push"></div>
<div id="push-footer"></div>
</div>
@ -132,7 +132,7 @@ piCId = '1482';
if (userName) {
$('.topmostnav_loggedout').hide();
$('.topmostnav_loggedin').show();
$('.navbar #usernav .nav li a').text(userName);
$('#logged-in-header-username').text(userName);
} else {
$('.topmostnav_loggedout').show();
$('.topmostnav_loggedin').hide();

View file

@ -3,6 +3,10 @@
margin-bottom: 10px;
}
#top-header .header2 {
font-weight: 400;
}
#usernav > ul {
margin-top: 10px;
z-index: 99999;
@ -49,6 +53,12 @@ ol li {
margin: 0px;
padding: 0em;
}
#nav_menu > #docsnav > #main-nav > li > a {
color: #253237;
}
#nav_menu > #docsnav > #main-nav > li.dd_on_hover > a {
color: #5992a3;
}
#nav_menu > #docsnav > #main-nav > li.dd_on_hover {
background: #b1d5df;
color: #5992a3;
@ -180,20 +190,42 @@ ol li {
line-height: 1.7;
}
.content-body ul {
padding: 10px 0px 10px 20px;
list-style-position: inherit;
list-style: circle;
}
.content-body ul li {
padding-bottom: 5px;
}
.content-body ul ul {
padding: 10px 0px 10px 40px;
}
.content-body h1 {
display: block !important;
font-size: 18px;
font-weight: 700;
font-size: 27px;
font-weight: 400;
color: #394d54;
line-height: 1.33;
font-weight: normal;
margin-bottom: 8px;
margin-top: 6px;
}
.content-body h2, .content-body h3 {
font-size: 14px;
font-weight: 500;
.content-body h2 {
font-size: 21px;
font-weight: 400;
color: #394d54;
line-height: 1.7;
margin-bottom: 4px;
margin-top: 10px;
}
.content-body h3 {
font-size: 18px;
font-weight: 400;
color: #394d54;
line-height: 1.7;
margin-bottom: 4px;
@ -234,8 +266,16 @@ ol li {
padding-left: 15px;
}
.content-body blockquote * {
color: #394d54;
.content-body ul {
margin-top: 0px !important;
}
.content-body blockquote a {
color: #24b8eb;
}
.content-body blockquote a:hover {
color: #008bb8;
}
.content-body ul {
@ -249,6 +289,11 @@ ol li {
border-radius: 4px;
-webkit-border-radius: 4px;
-moz-border-radius: 4px;
overflow-x: auto;
}
.content-body pre code {
overflow-wrap: normal;
white-space: pre;
}
#leftnav .nav.nav-tabs li a {
@ -267,4 +312,4 @@ ol li {
}
.navbar #usernav .nav li {
padding-top: 0px !important;
}
}

View file

@ -305,7 +305,7 @@ body {
margin: 0 auto -379px;
}
/* Set the fixed height of the footer here */
#push,
#push-footer,
#footer {
height: 379px;
background: #253237;

View file

@ -31,16 +31,15 @@
<div id="usernav" class="pull-right">
<ul class="nav user">
<li class="dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">
<a id="logged-in-header-username" class="dropdown-toggle" data-toggle="dropdown" href="#">
<img class="profile" src="https://secure.gravatar.com/avatar/26dc2b32b9e753823aef55e89687a9fc.jpg?s=30&amp;r=g&amp;d=mm" alt="profile picture">ostezer
</a>
<ul class="dropdown-menu pull-right">
<li><a href="http://www.docker.com/account/">View Profile</a></li>
<li><a href="http://www.docker.com/account/settings/">Settings</a></li>
<li><a href="http://www.docker.com/repos/">My Repositories</a></li>
<li><a href="http://www.docker.com/plans/billing-info">Billing</a></li>
<li><a href="http://www.docker.com/account/logout/?next=/">Log out</a></li>
<li><a href="https://hub.docker.com/">View Profile</a></li>
<li><a href="https://hub.docker.com/account/settings/">Settings</a></li>
<li><a href="https://hub.docker.com/repos/">My Repositories</a></li>
<li><a href="https://hub.docker.com/plans/billing-info">Billing</a></li>
<li><a href="https://hub.docker.com/account/logout/?next=/">Log out</a></li>
</ul>
</li>
</ul>

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

View file

@ -4,7 +4,9 @@
{% for menu in nav %}
{% if menu.title != '**HIDDEN**' %}
<li class="dd_menu pull-left{% if menu.active %} active{% endif %}">
<span>{{ menu.title }}</span>
{% for first_item in menu.children[:1] %}
<a href="{{ first_item.url }}">{{ menu.title }}</a>
{% endfor %}
<ul class="dd_submenu" style="max-height: 75px;">
{% for menu in menu.children %}
<li {% if menu.active %}class="active"{% endif %}>

View file

@ -1,138 +0,0 @@
package engine
import (
"fmt"
"github.com/dotcloud/docker/pkg/beam"
"github.com/dotcloud/docker/pkg/beam/data"
"io"
"os"
"strconv"
"sync"
)
type Sender struct {
beam.Sender
}
func NewSender(s beam.Sender) *Sender {
return &Sender{s}
}
func (s *Sender) Install(eng *Engine) error {
// FIXME: this doesn't exist yet.
eng.RegisterCatchall(s.Handle)
return nil
}
func (s *Sender) Handle(job *Job) Status {
cmd := append([]string{job.Name}, job.Args...)
env := data.Encode(job.Env().MultiMap())
msg := data.Empty().Set("cmd", cmd...).Set("env", env)
peer, err := beam.SendConn(s, msg.Bytes())
if err != nil {
return job.Errorf("beamsend: %v", err)
}
defer peer.Close()
var tasks sync.WaitGroup
defer tasks.Wait()
r := beam.NewRouter(nil)
r.NewRoute().KeyStartsWith("cmd", "log", "stdout").HasAttachment().Handler(func(p []byte, stdout *os.File) error {
tasks.Add(1)
go func() {
io.Copy(job.Stdout, stdout)
stdout.Close()
tasks.Done()
}()
return nil
})
r.NewRoute().KeyStartsWith("cmd", "log", "stderr").HasAttachment().Handler(func(p []byte, stderr *os.File) error {
tasks.Add(1)
go func() {
io.Copy(job.Stderr, stderr)
stderr.Close()
tasks.Done()
}()
return nil
})
r.NewRoute().KeyStartsWith("cmd", "log", "stdin").HasAttachment().Handler(func(p []byte, stdin *os.File) error {
go func() {
io.Copy(stdin, job.Stdin)
stdin.Close()
}()
return nil
})
var status int
r.NewRoute().KeyStartsWith("cmd", "status").Handler(func(p []byte, f *os.File) error {
cmd := data.Message(p).Get("cmd")
if len(cmd) != 2 {
return fmt.Errorf("usage: %s <0-127>", cmd[0])
}
s, err := strconv.ParseUint(cmd[1], 10, 8)
if err != nil {
return fmt.Errorf("usage: %s <0-127>", cmd[0])
}
status = int(s)
return nil
})
if _, err := beam.Copy(r, peer); err != nil {
return job.Errorf("%v", err)
}
return Status(status)
}
type Receiver struct {
*Engine
peer beam.Receiver
}
func NewReceiver(peer beam.Receiver) *Receiver {
return &Receiver{Engine: New(), peer: peer}
}
func (rcv *Receiver) Run() error {
r := beam.NewRouter(nil)
r.NewRoute().KeyExists("cmd").Handler(func(p []byte, f *os.File) error {
// Use the attachment as a beam return channel
peer, err := beam.FileConn(f)
if err != nil {
f.Close()
return err
}
f.Close()
defer peer.Close()
msg := data.Message(p)
cmd := msg.Get("cmd")
job := rcv.Engine.Job(cmd[0], cmd[1:]...)
// Decode env
env, err := data.Decode(msg.GetOne("env"))
if err != nil {
return fmt.Errorf("error decoding 'env': %v", err)
}
job.Env().InitMultiMap(env)
stdout, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes())
if err != nil {
return err
}
job.Stdout.Add(stdout)
stderr, err := beam.SendRPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes())
if err != nil {
return err
}
job.Stderr.Add(stderr)
stdin, err := beam.SendWPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes())
if err != nil {
return err
}
job.Stdin.Add(stdin)
// ignore error because we pass the raw status
job.Run()
err = peer.Send(data.Empty().Set("cmd", "status", fmt.Sprintf("%d", job.status)).Bytes(), nil)
if err != nil {
return err
}
return nil
})
_, err := beam.Copy(r, rcv.peer)
return err
}

View file

@ -1,150 +0,0 @@
package engine
import (
"bufio"
"bytes"
"fmt"
"github.com/dotcloud/docker/pkg/beam"
"github.com/dotcloud/docker/pkg/testutils"
"io"
"strings"
"testing"
"time"
)
func TestHelloWorld(t *testing.T) {
for i := 0; i < 10; i++ {
testRemote(t,
// Sender side
func(eng *Engine) {
job := eng.Job("echo", "hello", "world")
out := &bytes.Buffer{}
job.Stdout.Add(out)
job.Run()
if job.status != StatusOK {
t.Fatalf("#%v", job.StatusCode())
}
lines := bufio.NewScanner(out)
var i int
for lines.Scan() {
if lines.Text() != "hello world" {
t.Fatalf("%#v", lines.Text())
}
i++
}
if i != 1000 {
t.Fatalf("%#v", i)
}
},
// Receiver side
func(eng *Engine) {
eng.Register("echo", func(job *Job) Status {
// Simulate more output with a delay in the middle
for i := 0; i < 500; i++ {
fmt.Fprintf(job.Stdout, "%s\n", strings.Join(job.Args, " "))
}
time.Sleep(5 * time.Millisecond)
for i := 0; i < 500; i++ {
fmt.Fprintf(job.Stdout, "%s\n", strings.Join(job.Args, " "))
}
return StatusOK
})
},
)
}
}
func TestStdin(t *testing.T) {
testRemote(t,
func(eng *Engine) {
job := eng.Job("mirror")
job.Stdin.Add(strings.NewReader("hello world!\n"))
out := &bytes.Buffer{}
job.Stdout.Add(out)
if err := job.Run(); err != nil {
t.Fatal(err)
}
if out.String() != "hello world!\n" {
t.Fatalf("%#v", out.String())
}
},
func(eng *Engine) {
eng.Register("mirror", func(job *Job) Status {
if _, err := io.Copy(job.Stdout, job.Stdin); err != nil {
t.Fatal(err)
}
return StatusOK
})
},
)
}
func TestEnv(t *testing.T) {
var (
foo string
answer int
shadok_words []string
)
testRemote(t,
func(eng *Engine) {
job := eng.Job("sendenv")
job.Env().Set("foo", "bar")
job.Env().SetInt("answer", 42)
job.Env().SetList("shadok_words", []string{"ga", "bu", "zo", "meu"})
if err := job.Run(); err != nil {
t.Fatal(err)
}
},
func(eng *Engine) {
eng.Register("sendenv", func(job *Job) Status {
foo = job.Env().Get("foo")
answer = job.Env().GetInt("answer")
shadok_words = job.Env().GetList("shadok_words")
return StatusOK
})
},
)
// Check for results here rather than inside the job handler,
// otherwise the tests may incorrectly pass if the handler is not
// called.
if foo != "bar" {
t.Fatalf("%#v", foo)
}
if answer != 42 {
t.Fatalf("%#v", answer)
}
if strings.Join(shadok_words, ", ") != "ga, bu, zo, meu" {
t.Fatalf("%#v", shadok_words)
}
}
// Helpers
func testRemote(t *testing.T, senderSide, receiverSide func(*Engine)) {
sndConn, rcvConn, err := beam.USocketPair()
if err != nil {
t.Fatal(err)
}
defer sndConn.Close()
defer rcvConn.Close()
sender := NewSender(sndConn)
receiver := NewReceiver(rcvConn)
// Setup the sender side
eng := New()
sender.Install(eng)
// Setup the receiver side
receiverSide(receiver.Engine)
go receiver.Run()
testutils.Timeout(t, func() {
senderSide(eng)
})
}

View file

@ -1,43 +0,0 @@
package main
import (
"fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/pkg/beam"
"net"
"os"
)
func main() {
eng := engine.New()
c, err := net.Dial("unix", "beam.sock")
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
defer c.Close()
f, err := c.(*net.UnixConn).File()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
child, err := beam.FileConn(f)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
defer child.Close()
sender := engine.NewSender(child)
sender.Install(eng)
cmd := eng.Job(os.Args[1], os.Args[2:]...)
cmd.Stdout.Add(os.Stdout)
cmd.Stderr.Add(os.Stderr)
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}

View file

@ -1,119 +0,0 @@
package spawn
import (
"fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/pkg/beam"
"github.com/dotcloud/docker/utils"
"os"
"os/exec"
)
var initCalled bool
// Init checks if the current process has been created by Spawn.
//
// If no, it returns nil and the original program can continue
// unmodified.
//
// If no, it hijacks the process to run as a child worker controlled
// by its parent over a beam connection, with f exposed as a remote
// service. In this case Init never returns.
//
// The hijacking process takes place as follows:
// - Open file descriptor 3 as a beam endpoint. If this fails,
// terminate the current process.
// - Start a new engine.
// - Call f.Install on the engine. Any handlers registered
// will be available for remote invocation by the parent.
// - Listen for beam messages from the parent and pass them to
// the handlers.
// - When the beam endpoint is closed by the parent, terminate
// the current process.
//
// NOTE: Init must be called at the beginning of the same program
// calling Spawn. This is because Spawn approximates a "fork" by
// re-executing the current binary - where it expects spawn.Init
// to intercept the control flow and execute the worker code.
func Init(f engine.Installer) error {
initCalled = true
if os.Getenv("ENGINESPAWN") != "1" {
return nil
}
fmt.Printf("[%d child]\n", os.Getpid())
// Hijack the process
childErr := func() error {
fd3 := os.NewFile(3, "beam-introspect")
introsp, err := beam.FileConn(fd3)
if err != nil {
return fmt.Errorf("beam introspection error: %v", err)
}
fd3.Close()
defer introsp.Close()
eng := engine.NewReceiver(introsp)
if err := f.Install(eng.Engine); err != nil {
return err
}
if err := eng.Run(); err != nil {
return err
}
return nil
}()
if childErr != nil {
os.Exit(1)
}
os.Exit(0)
return nil // Never reached
}
// Spawn starts a new Engine in a child process and returns
// a proxy Engine through which it can be controlled.
//
// The commands available on the child engine are determined
// by an earlier call to Init. It is important that Init be
// called at the very beginning of the current program - this
// allows it to be called as a re-execution hook in the child
// process.
//
// Long story short, if you want to expose `myservice` in a child
// process, do this:
//
// func main() {
// spawn.Init(myservice)
// [..]
// child, err := spawn.Spawn()
// [..]
// child.Job("dosomething").Run()
// }
func Spawn() (*engine.Engine, error) {
if !initCalled {
return nil, fmt.Errorf("spawn.Init must be called at the top of the main() function")
}
cmd := exec.Command(utils.SelfPath())
cmd.Env = append(cmd.Env, "ENGINESPAWN=1")
local, remote, err := beam.SocketPair()
if err != nil {
return nil, err
}
child, err := beam.FileConn(local)
if err != nil {
local.Close()
remote.Close()
return nil, err
}
local.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, remote)
// FIXME: the beam/engine glue has no way to inform the caller
// of the child's termination. The next call will simply return
// an error.
if err := cmd.Start(); err != nil {
child.Close()
return nil, err
}
eng := engine.New()
if err := engine.NewSender(child).Install(eng); err != nil {
child.Close()
return nil, err
}
return eng, nil
}

View file

@ -1,61 +0,0 @@
package main
import (
"fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/engine/spawn"
"log"
"os"
"os/exec"
"strings"
)
func main() {
fmt.Printf("[%d] MAIN\n", os.Getpid())
spawn.Init(&Worker{})
fmt.Printf("[%d parent] spawning\n", os.Getpid())
eng, err := spawn.Spawn()
if err != nil {
log.Fatal(err)
}
fmt.Printf("[parent] spawned\n")
job := eng.Job(os.Args[1], os.Args[2:]...)
job.Stdout.Add(os.Stdout)
job.Stderr.Add(os.Stderr)
job.Run()
// FIXME: use the job's status code
os.Exit(0)
}
type Worker struct {
}
func (w *Worker) Install(eng *engine.Engine) error {
eng.Register("exec", w.Exec)
eng.Register("cd", w.Cd)
eng.Register("echo", w.Echo)
return nil
}
func (w *Worker) Exec(job *engine.Job) engine.Status {
fmt.Printf("--> %v\n", job.Args)
cmd := exec.Command(job.Args[0], job.Args[1:]...)
cmd.Stdout = job.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return job.Errorf("%v\n", err)
}
return engine.StatusOK
}
func (w *Worker) Cd(job *engine.Job) engine.Status {
if err := os.Chdir(job.Args[0]); err != nil {
return job.Errorf("%v\n", err)
}
return engine.StatusOK
}
func (w *Worker) Echo(job *engine.Job) engine.Status {
fmt.Fprintf(job.Stdout, "%s\n", strings.Join(job.Args, " "))
return engine.StatusOK
}

View file

@ -141,7 +141,7 @@ func TestOutputAdd(t *testing.T) {
t.Fatalf("Expected %d, got %d", len(input), n)
}
if output := b.String(); output != input {
t.Fatal("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output)
t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output)
}
}

View file

@ -137,5 +137,4 @@ func (t *Table) ReadFrom(src io.Reader) (n int64, err error) {
}
t.Add(env)
}
return 0, nil
}

View file

@ -24,12 +24,12 @@ speak up!
It is every maintainer's responsibility to:
* 1) Expose a clear roadmap for improving their component.
* 2) Deliver prompt feedback and decisions on pull requests.
* 3) Be available to anyone with questions, bug reports, criticism etc.
1. Expose a clear roadmap for improving their component.
2. Deliver prompt feedback and decisions on pull requests.
3. Be available to anyone with questions, bug reports, criticism etc.
on their component. This includes IRC, GitHub requests and the mailing
list.
* 4) Make sure their component respects the philosophy, design and
4. Make sure their component respects the philosophy, design and
roadmap of the project.
## How are decisions made?

Some files were not shown because too many files have changed in this diff Show more