1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge remote-tracking branch 'dotcloud/master' into docs-smart-changes

Conflicts:
	docs/sources/conf.py
This commit is contained in:
Thatcher Peskens 2013-08-13 16:25:58 -07:00
commit f127c471a1
52 changed files with 867 additions and 124 deletions

4
.gitignore vendored
View file

@ -5,10 +5,7 @@ docker/docker
a.out
*.orig
build_src
command-line-arguments.test
.flymake*
docker.test
auth/auth.test
.idea
.DS_Store
docs/_build
@ -16,3 +13,4 @@ docs/_static
docs/_templates
.gopath/
.dotcloud
*.test

View file

@ -1,4 +1,4 @@
# Generate AUTHORS: git log --all --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
<charles.hooper@dotcloud.com> <chooper@plumata.com>
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
@ -25,3 +25,4 @@ Walter Stanish <walter@pratyeka.org>
Roberto Hashioka <roberto_hashioka@hotmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
David Sissitka <me@dsissitka.com>
Nolan Darilek <nolan@thewordnerd.info>

View file

@ -40,6 +40,7 @@ Erno Hopearuoho <erno.hopearuoho@gmail.com>
Evan Wies <evan@neomantra.net>
ezbercih <cem.ezberci@gmail.com>
Fabrizio Regini <freegenie@gmail.com>
Fareed Dudhia <fareeddudhia@googlemail.com>
Flavio Castelli <fcastelli@suse.com>
Francisco Souza <f@souza.cc>
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
@ -83,6 +84,7 @@ Nelson Chen <crazysim@gmail.com>
Niall O'Higgins <niallo@unworkable.org>
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
Nick Stinemates <nick@stinemates.org>
Nolan Darilek <nolan@thewordnerd.info>
odk- <github@odkurzacz.org>
Paul Bowsher <pbowsher@globalpersonals.co.uk>
Paul Hammond <paul@paulhammond.org>
@ -106,6 +108,7 @@ Thomas Hansen <thomas.hansen@gmail.com>
Tianon Gravi <admwiggin@gmail.com>
Tim Terhorst <mynamewastaken+git@gmail.com>
Tobias Bieniek <Tobias.Bieniek@gmx.de>
Tobias Schmidt <ts@soundcloud.com>
Tobias Schwab <tobias.schwab@dynport.de>
Tom Hulihan <hulihan.tom159@gmail.com>
unclejack <unclejacksons@gmail.com>

View file

@ -22,6 +22,9 @@ run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt
run apt-get update
run apt-get install -y lxc
run apt-get install -y aufs-tools
# Docker requires code.google.com/p/go.net/websocket
run apt-get install -y -q mercurial
run PKG=code.google.com/p/go.net REV=78ad7f42aa2e; hg clone https://$PKG /go/src/$PKG && cd /go/src/$PKG && hg checkout -r $REV
# Upload docker source
add . /go/src/github.com/dotcloud/docker
# Build the binary

View file

@ -1,6 +1,6 @@
Solomon Hykes <solomon@dotcloud.com>
Guillaume Charmes <guillaume@dotcloud.com>
Victor Vieux <victor@dotcloud.com>
Michael Crosby <michael@crosbymichael.com>
api.go: Victor Vieux <victor@dotcloud.com>
Vagrantfile: Daniel Mizyrycki <daniel@dotcloud.com>
Solomon Hykes <solomon@dotcloud.com> (@shykes)
Guillaume Charmes <guillaume@dotcloud.com> (@creack)
Victor Vieux <victor@dotcloud.com> (@vieux)
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
api.go: Victor Vieux <victor@dotcloud.com> (@vieux)
Vagrantfile: Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)

24
api.go
View file

@ -15,6 +15,7 @@ import (
"net/http"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
)
@ -236,8 +237,7 @@ func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
}
}
}
for {
event := <-listener
for event := range listener {
err := sendEvent(wf, &event)
if err != nil && err.Error() == "JSON error" {
continue
@ -1087,7 +1087,25 @@ func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
return e
}
if proto == "unix" {
os.Chmod(addr, 0700)
if err := os.Chmod(addr, 0660); err != nil {
return err
}
groups, err := ioutil.ReadFile("/etc/group")
if err != nil {
return err
}
re := regexp.MustCompile("(^|\n)docker:.*?:([0-9]+)")
if gidMatch := re.FindStringSubmatch(string(groups)); gidMatch != nil {
gid, err := strconv.Atoi(gidMatch[2])
if err != nil {
return err
}
utils.Debugf("docker group found. gid: %d", gid)
if err := os.Chown(addr, 0, gid); err != nil {
return err
}
}
}
httpSrv := http.Server{Addr: addr, Handler: r}
return httpSrv.Serve(l)

View file

@ -471,7 +471,7 @@ func TestGetContainersTop(t *testing.T) {
}
defer runtime.Destroy(container)
defer func() {
// Make sure the process dies before destorying runtime
// Make sure the process dies before destroying runtime
container.stdin.Close()
container.WaitTimeout(2 * time.Second)
}()
@ -563,7 +563,7 @@ func TestGetContainersByName(t *testing.T) {
t.Fatal(err)
}
if outContainer.ID != container.ID {
t.Fatalf("Wrong containers retrieved. Expected %s, recieved %s", container.ID, outContainer.ID)
t.Fatalf("Wrong containers retrieved. Expected %s, received %s", container.ID, outContainer.ID)
}
}
@ -802,7 +802,7 @@ func TestPostContainersStart(t *testing.T) {
r = httptest.NewRecorder()
if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err == nil {
t.Fatalf("A running containter should be able to be started")
t.Fatalf("A running container should be able to be started")
}
if err := container.Kill(); err != nil {
@ -926,7 +926,7 @@ func TestPostContainersAttach(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
// Try to avoid the timeoout in destroy. Best effort, don't check error
// Try to avoid the timeout in destroy. Best effort, don't check error
defer func() {
closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
container.Kill()
@ -982,7 +982,7 @@ func TestPostContainersAttach(t *testing.T) {
t.Fatalf("/bin/cat is not running after closing stdin")
}
// Try to avoid the timeoout in destroy. Best effort, don't check error
// Try to avoid the timeout in destroy. Best effort, don't check error
cStdin, _ := container.StdinPipe()
cStdin.Close()
container.Wait()

View file

@ -98,7 +98,7 @@ func TarFilter(path string, compression Compression, filter []string) (io.Reader
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `path`.
// The archive may be compressed with one of the following algorithgms:
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
// FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(archive io.Reader, path string) error {

View file

@ -509,7 +509,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
fmt.Fprintf(b.out, "Successfully built %s\n", utils.TruncateID(b.image))
return b.image, nil
}
return "", fmt.Errorf("An error occured during the build\n")
return "", fmt.Errorf("An error occurred during the build\n")
}
func NewBuildFile(srv *Server, out io.Writer, verbose, utilizeCache bool) BuildFile {

View file

@ -194,7 +194,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
}
var body io.Reader
// Setup an upload progress bar
// FIXME: ProgressReader shouldn't be this annoyning to use
// FIXME: ProgressReader shouldn't be this annoying to use
if context != nil {
sf := utils.NewStreamFormatter(false)
body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf.FormatProgress("", "Uploading context", "%v bytes%0.0s%0.0s"), sf, true)
@ -857,10 +857,12 @@ func (cli *DockerCli) CmdPush(args ...string) error {
}
if err := push(); err != nil {
if err == fmt.Errorf("Authentication is required.") {
if err = cli.checkIfLogged("push"); err == nil {
return push()
if err.Error() == "Authentication is required." {
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
if err := cli.CmdLogin(""); err != nil {
return err
}
return push()
}
return err
}
@ -1512,19 +1514,6 @@ func (cli *DockerCli) CmdCp(args ...string) error {
return nil
}
func (cli *DockerCli) checkIfLogged(action string) error {
// If condition AND the login failed
if cli.configFile.Configs[auth.IndexServerAddress()].Username == "" {
if err := cli.CmdLogin(""); err != nil {
return err
}
if cli.configFile.Configs[auth.IndexServerAddress()].Username == "" {
return fmt.Errorf("Please login prior to %s. ('docker login')", action)
}
}
return nil
}
func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int, error) {
var params io.Reader
if data != nil {
@ -1728,8 +1717,7 @@ func (cli *DockerCli) monitorTtySize(id string) error {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGWINCH)
go func() {
for {
<-sigchan
for _ = range sigchan {
cli.resizeTty(id)
}
}()

View file

@ -373,7 +373,7 @@ func TestAttachDisconnect(t *testing.T) {
t.Fatalf("/bin/cat is not running after closing stdin")
}
// Try to avoid the timeoout in destroy. Best effort, don't check error
// Try to avoid the timeout in destroy. Best effort, don't check error
cStdin, _ := container.StdinPipe()
cStdin.Close()
container.Wait()

View file

@ -186,7 +186,7 @@ func TestDiff(t *testing.T) {
}
}
// Create a new containere
// Create a new container
container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
defer runtime.Destroy(container3)
@ -351,10 +351,10 @@ func TestStart(t *testing.T) {
t.Errorf("Container should be running")
}
if err := container.Start(hostConfig); err == nil {
t.Fatalf("A running containter should be able to be started")
t.Fatalf("A running container should be able to be started")
}
// Try to avoid the timeoout in destroy. Best effort, don't check error
// Try to avoid the timeout in destroy. Best effort, don't check error
cStdin.Close()
container.WaitTimeout(2 * time.Second)
}
@ -401,9 +401,11 @@ func TestOutput(t *testing.T) {
func TestKillDifferentUser(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"tail", "-f", "/etc/resolv.conf"},
Cmd: []string{"cat"},
OpenStdin: true,
User: "daemon",
},
)
@ -411,12 +413,12 @@ func TestKillDifferentUser(t *testing.T) {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer container.stdin.Close()
if container.State.Running {
t.Errorf("Container shouldn't be running")
}
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
if err := container.Start(&HostConfig{}); err != nil {
t.Fatal(err)
}
@ -426,8 +428,13 @@ func TestKillDifferentUser(t *testing.T) {
}
})
// Even if the state is running, lets give some time to lxc to spawn the process
container.WaitTimeout(500 * time.Millisecond)
setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
out, _ := container.StdoutPipe()
in, _ := container.StdinPipe()
if err := assertPipe("hello\n", "hello", out, in, 15); err != nil {
t.Fatal(err)
}
})
if err := container.Kill(); err != nil {
t.Fatal(err)
@ -764,7 +771,7 @@ func TestUser(t *testing.T) {
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
User: "unkownuser",
User: "unknownuser",
},
)
if err != nil {

455
contrib/docker.bash Normal file
View file

@ -0,0 +1,455 @@
#!bash
#
# bash completion file for core docker commands
#
# This script provides supports completion of:
# - commands and their options
# - container ids
# - image repos and tags
# - filepaths
#
# To enable the completions either:
# - place this file in /etc/bash_completion.d
# or
# - copy this file and add the line below to your .bashrc after
# bash completion features are loaded
# . docker.bash
#
# Note:
# Currently, the completions will not work if the docker daemon is not
# bound to the default communication port/socket
# If the docker daemon is using a unix socket for communication your user
# must have access to the socket for the completions to function correctly
have docker && {
__docker_containers()
{
local containers
containers="$( docker ps -a -q )"
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
}
__docker_image_repos()
{
local repos
repos="$( docker images | awk 'NR>1{print $1}' )"
COMPREPLY=( $( compgen -W "$repos" -- "$cur" ) )
}
__docker_images()
{
local images
images="$( docker images | awk 'NR>1{print $1":"$2}' )"
COMPREPLY=( $( compgen -W "$images" -- "$cur" ) )
__ltrim_colon_completions "$cur"
}
__docker_image_repos_and_tags()
{
local repos images
repos="$( docker images | awk 'NR>1{print $1}' )"
images="$( docker images | awk 'NR>1{print $1":"$2}' )"
COMPREPLY=( $( compgen -W "$repos $images" -- "$cur" ) )
__ltrim_colon_completions "$cur"
}
__docker_containers_and_images()
{
local containers images
containers="$( docker ps -a -q )"
images="$( docker images | awk 'NR>1{print $1":"$2}' )"
COMPREPLY=( $( compgen -W "$images $containers" -- "$cur" ) )
__ltrim_colon_completions "$cur"
}
_docker_docker()
{
case "$prev" in
-H)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-H" -- "$cur" ) )
;;
*)
COMPREPLY=( $( compgen -W "$commands help" -- "$cur" ) )
;;
esac
}
_docker_attach()
{
if [ $cpos -eq $cword ]; then
__docker_containers
fi
}
_docker_build()
{
case "$prev" in
-t)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-t -q" -- "$cur" ) )
;;
*)
_filedir
;;
esac
}
_docker_commit()
{
case "$prev" in
-author|-m|-run)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-author -m -run" -- "$cur" ) )
;;
*)
__docker_containers
;;
esac
}
_docker_diff()
{
if [ $cpos -eq $cword ]; then
__docker_containers
fi
}
_docker_events()
{
COMPREPLY=( $( compgen -W "-since" -- "$cur" ) )
}
_docker_export()
{
if [ $cpos -eq $cword ]; then
__docker_containers
fi
}
_docker_help()
{
if [ $cpos -eq $cword ]; then
COMPREPLY=( $( compgen -W "$commands" -- "$cur" ) )
fi
}
_docker_history()
{
if [ $cpos -eq $cword ]; then
__docker_image_repos_and_tags
fi
}
_docker_images()
{
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-a -notrunc -q -viz" -- "$cur" ) )
;;
*)
local counter=$cpos
while [ $counter -le $cword ]; do
case "${words[$counter]}" in
-*)
;;
*)
break
;;
esac
(( counter++ ))
done
if [ $counter -eq $cword ]; then
__docker_image_repos
fi
;;
esac
}
_docker_import()
{
return
}
_docker_info()
{
return
}
_docker_insert()
{
if [ $cpos -eq $cword ]; then
__docker_image_repos_and_tags
fi
}
_docker_inspect()
{
__docker_containers_and_images
}
_docker_kill()
{
__docker_containers
}
_docker_login()
{
COMPREPLY=( $( compgen -W "-e -p -u" -- "$cur" ) )
}
_docker_logs()
{
if [ $cpos -eq $cword ]; then
__docker_containers
fi
}
_docker_port()
{
if [ $cpos -eq $cword ]; then
__docker_containers
fi
}
_docker_ps()
{
COMPREPLY=( $( compgen -W "-a -beforeId -l -n -notrunc -q -s -sinceId" -- "$cur" ) )
}
_docker_pull()
{
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
}
_docker_push()
{
return
}
_docker_restart()
{
case "$prev" in
-t)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
;;
*)
__docker_containers
;;
esac
}
_docker_rm()
{
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-v" -- "$cur" ) )
;;
*)
__docker_containers
;;
esac
}
_docker_rmi()
{
__docker_image_repos_and_tags
}
_docker_run()
{
case "$prev" in
-cidfile)
_filedir
;;
-volumes-from)
__docker_containers
;;
-a|-c|-dns|-e|-entrypoint|-h|-m|-p|-u|-v)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-a -c -cidfile -d -dns -e -entrypoint -h -i -m -n -p -t -u -v -volumes-from" -- "$cur" ) )
;;
*)
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-a -notrunc -q -viz" -- "$cur" ) )
;;
*)
local counter=$cpos
while [ $counter -le $cword ]; do
case "${words[$counter]}" in
-a|-c|-cidfile|-dns|-e|-entrypoint|-h|-m|-p|-u|-v|-volumes-from)
(( counter++ ))
;;
-*)
;;
*)
break
;;
esac
(( counter++ ))
done
if [ $counter -eq $cword ]; then
__docker_image_repos_and_tags
fi
;;
esac
;;
esac
}
_docker_search()
{
COMPREPLY=( $( compgen -W "-notrunc" -- "$cur" ) )
}
_docker_start()
{
__docker_containers
}
_docker_stop()
{
case "$prev" in
-t)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
;;
*)
__docker_containers
;;
esac
}
_docker_tag()
{
COMPREPLY=( $( compgen -W "-f" -- "$cur" ) )
}
_docker_top()
{
if [ $cpos -eq $cword ]; then
__docker_containers
fi
}
_docker_version()
{
return
}
_docker_wait()
{
__docker_containers
}
_docker()
{
local cur prev words cword command="docker" counter=1 word cpos
local commands="
attach
build
commit
diff
events
export
history
images
import
info
insert
inspect
kill
login
logs
port
ps
pull
push
restart
rm
rmi
run
search
start
stop
tag
top
version
wait
"
COMPREPLY=()
_get_comp_words_by_ref -n : cur prev words cword
while [ $counter -lt $cword ]; do
word="${words[$counter]}"
case "$word" in
-H)
(( counter++ ))
;;
-*)
;;
*)
command="$word"
cpos=$counter
(( cpos++ ))
break
;;
esac
(( counter++ ))
done
local completions_func=_docker_${command}
declare -F $completions_func >/dev/null && $completions_func
return 0
}
complete -F _docker docker
}

View file

@ -37,7 +37,7 @@ func main() {
flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
flag.Parse()
if len(flHosts) > 1 {
flHosts = flHosts[1:] //trick to display a nice defaul value in the usage
flHosts = flHosts[1:] //trick to display a nice default value in the usage
}
for i, flHost := range flHosts {
flHosts[i] = utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)

View file

@ -1,2 +1,2 @@
Andy Rothfusz <andy@dotcloud.com>
Ken Cochrane <ken@dotcloud.com>
Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
Ken Cochrane <ken@dotcloud.com> (@kencochrane)

View file

@ -28,7 +28,7 @@ Usage
Working using GitHub's file editor
----------------------------------
Alternatively, for small changes and typo's you might want to use GitHub's built in file editor. It allows
you to preview your changes right online. Just be carefull not to create many commits.
you to preview your changes right online. Just be careful not to create many commits.
Images
------

View file

@ -1 +1 @@
Solomon Hykes <solomon@dotcloud.com>
Solomon Hykes <solomon@dotcloud.com> (@shykes)

View file

@ -26,7 +26,7 @@ Docker Remote API
2. Versions
===========
The current verson of the API is 1.4
The current version of the API is 1.4
Calling /images/<name>/insert is the same as calling
/v1.4/images/<name>/insert
@ -107,7 +107,7 @@ The client should send it's authConfig as POST on each call of
Only checks the configuration but doesn't store it on the server
Deleting an image is now improved, will only untag the image if it
has chidren and remove all the untagged parents if has any.
has children and remove all the untagged parents if has any.
.. http:post:: /images/<name>/delete

View file

@ -305,8 +305,8 @@ Start a container
:statuscode 500: server error
Stop a contaier
***************
Stop a container
****************
.. http:post:: /containers/(id)/stop

View file

@ -317,8 +317,8 @@ Start a container
:statuscode 500: server error
Stop a contaier
***************
Stop a container
****************
.. http:post:: /containers/(id)/stop

View file

@ -365,8 +365,8 @@ Start a container
:statuscode 500: server error
Stop a contaier
***************
Stop a container
****************
.. http:post:: /containers/(id)/stop

View file

@ -368,8 +368,8 @@ Start a container
:statuscode 500: server error
Stop a contaier
***************
Stop a container
****************
.. http:post:: /containers/(id)/stop

View file

@ -2,9 +2,10 @@
:description: Documentation for docker Registry and Registry API
:keywords: docker, registry, api, index
.. _registryindexspec:
=====================
Registry & index Spec
Registry & Index Spec
=====================
.. contents:: Table of Contents
@ -154,7 +155,7 @@ API (pulling repository foo/bar):
.. note::
**Its possible not to use the Index at all!** In this case, a deployed version of the Registry is deployed to store and serve images. Those images are not authentified and the security is not guaranteed.
**Its possible not to use the Index at all!** In this case, a deployed version of the Registry is deployed to store and serve images. Those images are not authenticated and the security is not guaranteed.
.. note::

View file

@ -24,7 +24,7 @@
-p=[]: Map a network port to the container
-t=false: Allocate a pseudo-tty
-u="": Username or UID
-d=[]: Set custom dns servers for the container
-dns=[]: Set custom dns servers for the container
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "host-dir" is missing, then docker creates a new volume.
-volumes-from="": Mount all volumes from the given container.
-entrypoint="": Overwrite the default entrypoint set by the image.

View file

@ -18,7 +18,7 @@ import sys, os
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuratiofn -----------------------------------------------------
# -- General configuration -----------------------------------------------------

View file

@ -1,5 +1,5 @@
:title: Contribution Guidelines
:description: Contribution guidelines: create issues, convetions, pull requests
:description: Contribution guidelines: create issues, conventions, pull requests
:keywords: contributing, docker, documentation, help, guideline
Contributing to Docker

View file

@ -1,6 +1,6 @@
:title: Docker Examples
:description: Examples on how to use Docker
:keywords: docker, hello world, node, nodejs, python, couch, couchdb, redis, ssh, sshd, examples
:keywords: docker, hello world, node, nodejs, python, couch, couchdb, redis, ssh, sshd, examples, postgresql
@ -20,3 +20,4 @@ Contents:
running_redis_service
running_ssh_service
couchdb_data_volumes
postgresql_service

View file

@ -0,0 +1,158 @@
:title: PostgreSQL service How-To
:description: Running and installing a PostgreSQL service
:keywords: docker, example, package installation, postgresql
.. _postgresql_service:
PostgreSQL Service
==================
.. note::
A shorter version of `this blog post`_.
.. note::
As of version 0.5.2, docker requires root privileges to run.
You have to either manually adjust your system configuration (permissions on
/var/run/docker.sock or sudo config), or prefix `docker` with `sudo`. Check
`this thread`_ for details.
.. _this blog post: http://zaiste.net/2013/08/docker_postgresql_how_to/
.. _this thread: https://groups.google.com/forum/?fromgroups#!topic/docker-club/P3xDLqmLp0E
Installing PostgreSQL on Docker
-------------------------------
For clarity I won't be showing commands output.
Run an interactive shell in Docker container.
.. code-block:: bash
docker run -i -t ubuntu /bin/bash
Update its dependencies.
.. code-block:: bash
apt-get update
Install ``python-software-properies``.
.. code-block:: bash
apt-get install python-software-properties
apt-get install software-properties-common
Add Pitti's PostgreSQL repository. It contains the most recent stable release
of PostgreSQL i.e. ``9.2``.
.. code-block:: bash
add-apt-repository ppa:pitti/postgresql
apt-get update
Finally, install PostgreSQL 9.2
.. code-block:: bash
apt-get -y install postgresql-9.2 postgresql-client-9.2 postgresql-contrib-9.2
Now, create a PostgreSQL superuser role that can create databases and other roles.
Following Vagrant's convention the role will be named `docker` with `docker`
password assigned to it.
.. code-block:: bash
sudo -u postgres createuser -P -d -r -s docker
Create a test database also named ``docker`` owned by previously created ``docker``
role.
.. code-block:: bash
sudo -u postgres createdb -O docker docker
Adjust PostgreSQL configuration so that remote connections to the database are
possible. Make sure that inside ``/etc/postgresql/9.2/main/pg_hba.conf`` you have
following line:
.. code-block:: bash
host all all 0.0.0.0/0 md5
Additionaly, inside ``/etc/postgresql/9.2/main/postgresql.conf`` uncomment
``listen_address`` so it is as follows:
.. code-block:: bash
listen_address='*'
*Note:* this PostgreSQL setup is for development only purposes. Refer to
PostgreSQL documentation how to fine-tune these settings so that it is enough
secure.
Create an image and assign it a name. ``<container_id>`` is in the Bash prompt;
you can also locate it using ``docker ps -a``.
.. code-block:: bash
docker commit <container_id> <your username>/postgresql
Finally, run PostgreSQL server via ``docker``.
.. code-block:: bash
CONTAINER=$(docker run -d -p 5432 \
-t <your username>/postgresql \
/bin/su postgres -c '/usr/lib/postgresql/9.2/bin/postgres \
-D /var/lib/postgresql/9.2/main \
-c config_file=/etc/postgresql/9.2/main/postgresql.conf')
Connect the PostgreSQL server using ``psql``.
.. code-block:: bash
CONTAINER_IP=$(docker inspect $CONTAINER | grep IPAddress | awk '{ print $2 }' | tr -d ',"')
psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W
As before, create roles or databases if needed.
.. code-block:: bash
psql (9.2.4)
Type "help" for help.
docker=# CREATE DATABASE foo OWNER=docker;
CREATE DATABASE
Additionally, publish there your newly created image on Docker Index.
.. code-block:: bash
docker login
Username: <your username>
[...]
.. code-block:: bash
docker push <your username>/postgresql
PostgreSQL service auto-launch
------------------------------
Running our image seems complicated. We have to specify the whole command with
``docker run``. Let's simplify it so the service starts automatically when the
container starts.
.. code-block:: bash
docker commit <container_id> <your username>/postgresql -run='{"Cmd": \
["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.2/bin/postgres -D \
/var/lib/postgresql/9.2/main -c \
config_file=/etc/postgresql/9.2/main/postgresql.conf"], PortSpecs": ["5432"]}
From now on, just type ``docker run <your username>/postgresql`` and PostgreSQL
should automatically start.

View file

@ -36,7 +36,13 @@ Inside of the "shykes/pybuilder" image there is a command called buildapp, we ar
docker attach $BUILD_JOB
[...]
We attach to the new container to see what is going on. Ctrl-C to disconnect
While this container is running, we can attach to the new container to see what is going on. Ctrl-C to disconnect.
.. code-block:: bash
docker ps -a
List all docker containers. If this container has already finished running, it will still be listed here.
.. code-block:: bash

View file

@ -33,7 +33,7 @@ The password is 'screencast'
.. code-block:: bash
# Hello! We are going to try and install openssh on a container and run it as a servic
# Hello! We are going to try and install openssh on a container and run it as a service
# let's pull ubuntu to get a base ubuntu image.
$ docker pull ubuntu
# I had it so it was quick
@ -46,7 +46,7 @@ The password is 'screencast'
$ apt-get install openssh-server
# ok. lets see if we can run it.
$ which sshd
# we need to create priviledge separation directory
# we need to create privilege separation directory
$ mkdir /var/run/sshd
$ /usr/sbin/sshd
$ exit

View file

@ -9,40 +9,140 @@ FAQ
Most frequently asked questions.
--------------------------------
1. **How much does Docker cost?**
How much does Docker cost?
..........................
Docker is 100% free, it is open source, so you can use it without paying.
2. **What open source license are you using?**
What open source license are you using?
.......................................
We are using the Apache License Version 2.0, see it here: https://github.com/dotcloud/docker/blob/master/LICENSE
We are using the Apache License Version 2.0, see it here:
https://github.com/dotcloud/docker/blob/master/LICENSE
3. **Does Docker run on Mac OS X or Windows?**
Does Docker run on Mac OS X or Windows?
.......................................
Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a
virtual machine on your box, and get the best of both worlds. Check out the :ref:`install_using_vagrant` and :ref:`windows` installation guides.
Not at this time, Docker currently only runs on Linux, but you can
use VirtualBox to run Docker in a virtual machine on your box, and
get the best of both worlds. Check out the
:ref:`install_using_vagrant` and :ref:`windows` installation
guides.
4. **How do containers compare to virtual machines?**
How do containers compare to virtual machines?
..............................................
They are complementary. VMs are best used to allocate chunks of hardware resources. Containers operate at the process level, which makes them very lightweight and perfect as a unit of software delivery.
They are complementary. VMs are best used to allocate chunks of
hardware resources. Containers operate at the process level, which
makes them very lightweight and perfect as a unit of software
delivery.
5. **Can I help by adding some questions and answers?**
What does Docker add to just plain LXC?
.......................................
Docker is not a replacement for LXC. "LXC" refers to capabilities
of the Linux kernel (specifically namespaces and control groups)
which allow sandboxing processes from one another, and controlling
their resource allocations. On top of this low-level foundation of
kernel features, Docker offers a high-level tool with several
powerful functionalities:
* *Portable deployment across machines.*
Docker defines a format for bundling an application and all its
dependencies into a single object which can be transferred to
any Docker-enabled machine, and executed there with the
guarantee that the execution environment exposed to the
application will be the same. LXC implements process sandboxing,
which is an important pre-requisite for portable deployment, but
that alone is not enough for portable deployment. If you sent me
a copy of your application installed in a custom LXC
configuration, it would almost certainly not run on my machine
the way it does on yours, because it is tied to your machine's
specific configuration: networking, storage, logging, distro,
etc. Docker defines an abstraction for these machine-specific
settings, so that the exact same Docker container can run -
unchanged - on many different machines, with many different
configurations.
* *Application-centric.*
Docker is optimized for the deployment of applications, as
opposed to machines. This is reflected in its API, user
interface, design philosophy and documentation. By contrast, the
``lxc`` helper scripts focus on containers as lightweight
machines - basically servers that boot faster and need less
RAM. We think there's more to containers than just that.
* *Automatic build.*
Docker includes :ref:`a tool for developers to automatically
assemble a container from their source code <dockerbuilder>`,
with full control over application dependencies, build tools,
packaging etc. They are free to use ``make, maven, chef, puppet,
salt,`` Debian packages, RPMs, source tarballs, or any
combination of the above, regardless of the configuration of the
machines.
* *Versioning.*
Docker includes git-like capabilities for tracking successive
versions of a container, inspecting the diff between versions,
committing new versions, rolling back etc. The history also
includes how a container was assembled and by whom, so you get
full traceability from the production server all the way back to
the upstream developer. Docker also implements incremental
uploads and downloads, similar to ``git pull``, so new versions
of a container can be transferred by only sending diffs.
* *Component re-use.*
Any container can be used as a :ref:`"base image"
<base_image_def>` to create more specialized components. This
can be done manually or as part of an automated build. For
example you can prepare the ideal Python environment, and use it
as a base for 10 different applications. Your ideal Postgresql
setup can be re-used for all your future projects. And so on.
* *Sharing.*
Docker has access to a `public registry
<http://index.docker.io>`_ where thousands of people have
uploaded useful containers: anything from Redis, CouchDB,
Postgres to IRC bouncers to Rails app servers to Hadoop to base
images for various Linux distros. The :ref:`registry
<registryindexspec>` also includes an official "standard
library" of useful containers maintained by the Docker team. The
registry itself is open-source, so anyone can deploy their own
registry to store and transfer private containers, for internal
server deployments for example.
* *Tool ecosystem.*
Docker defines an API for automating and customizing the
creation and deployment of containers. There are a huge number
of tools integrating with Docker to extend its
capabilities. PaaS-like deployment (Dokku, Deis, Flynn),
multi-node orchestration (Maestro, Salt, Mesos, Openstack Nova),
management dashboards (docker-ui, Openstack Horizon, Shipyard),
configuration management (Chef, Puppet), continuous integration
(Jenkins, Strider, Travis), etc. Docker is rapidly establishing
itself as the standard for container-based tooling.
Can I help by adding some questions and answers?
................................................
Definitely! You can fork `the repo`_ and edit the documentation sources.
42. **Where can I find more answers?**
Where can I find more answers?
..............................
You can find more answers on:
* `Docker club mailinglist`_
* `Docker user mailinglist`_
* `Docker developer mailinglist`_
* `IRC, docker on freenode`_
* `Github`_
* `Ask questions on Stackoverflow`_
* `Join the conversation on Twitter`_
.. _Docker club mailinglist: https://groups.google.com/d/forum/docker-club
.. _Docker user mailinglist: https://groups.google.com/d/forum/docker-user
.. _Docker developer mailinglist: https://groups.google.com/d/forum/docker-dev
.. _the repo: http://www.github.com/dotcloud/docker
.. _IRC, docker on freenode: irc://chat.freenode.net#docker
.. _Github: http://www.github.com/dotcloud/docker

View file

@ -23,7 +23,7 @@ dependencies.
commit``).
Each use of ``docker`` is documented here. The features of Docker are
currently in active development, so this documention will change
currently in active development, so this documentation will change
frequently.
For an overview of Docker, please see the `Introduction

View file

@ -10,7 +10,7 @@ Rackspace Cloud
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
Installing Docker on Ubuntu proviced by Rackspace is pretty straightforward, and you should mostly be able to follow the
Installing Docker on Ubuntu provided by Rackspace is pretty straightforward, and you should mostly be able to follow the
:ref:`ubuntu_linux` installation guide.
**However, there is one caveat:**

View file

@ -14,7 +14,7 @@ switches the whole rootfs volume to read-write mode.
Layer
.....
When Docker mounts the rootfs, it starts read-only, as in a tradtional
When Docker mounts the rootfs, it starts read-only, as in a traditional
Linux boot, but then, instead of changing the file system to
read-write mode, it takes advantage of a `union mount
<http://en.wikipedia.org/wiki/Union_mount>`_ to add a read-write file

View file

@ -2,6 +2,8 @@
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
:keywords: builder, docker, Dockerfile, automation, image creation
.. _dockerbuilder:
==================
Dockerfile Builder
==================

View file

@ -1,6 +1,6 @@
:title: Working With Repositories
:description: Repositories allow users to share images.
:keywords: repo, repositiores, usage, pull image, push image, image, documentation
:keywords: repo, repositories, usage, pull image, push image, image, documentation
.. _working_with_the_repository:
@ -71,7 +71,7 @@ function completely independently from the Central Index.
Find public images available on the Central Index
-------------------------------------------------
Seach by name, namespace or description
Search by name, namespace or description
.. code-block:: bash

View file

@ -1 +1 @@
Thatcher Peskens <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@dotcloud.com> (@dhrp)

View file

@ -23,7 +23,7 @@ run add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu
run apt-get update
# Packages required to checkout, build and upload docker
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd curl
run curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.1.linux-amd64.tar.gz
run curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz
run tar -C /usr/local -xzf /go.tar.gz
run echo "export PATH=/usr/local/go/bin:$PATH" > /.bashrc
run echo "export PATH=/usr/local/go/bin:$PATH" > /.bash_profile

View file

@ -1 +1 @@
Daniel Mizyrycki <daniel@dotcloud.com>
Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)

View file

@ -1,2 +1,2 @@
Ken Cochrane <ken@dotcloud.com>
Jerome Petazzoni <jerome@dotcloud.com>
Ken Cochrane <ken@dotcloud.com> (@kencochrane)
Jerome Petazzoni <jerome@dotcloud.com> (@jpetazzo)

View file

@ -1 +1 @@
Joffrey Fuhrer <joffrey@dotcloud.com>
Joffrey Fuhrer <joffrey@dotcloud.com> (@shin-)

View file

@ -332,7 +332,7 @@ func newPortMapper() (*PortMapper, error) {
return mapper, nil
}
// Port allocator: Atomatically allocate and release networking ports
// Port allocator: Automatically allocate and release networking ports
type PortAllocator struct {
sync.Mutex
inUse map[int]struct{}
@ -385,7 +385,7 @@ func newPortAllocator() (*PortAllocator, error) {
return allocator, nil
}
// IP allocator: Atomatically allocate and release networking ports
// IP allocator: Automatically allocate and release networking ports
type IPAllocator struct {
network *net.IPNet
queueAlloc chan allocatedIP

View file

@ -1 +1 @@
Daniel Mizyrycki <daniel@dotcloud.com>
Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)

View file

@ -5,4 +5,6 @@ stop on runlevel [!2345]
respawn
exec /usr/bin/docker -d
script
/usr/bin/docker -d
end script

View file

@ -1,3 +1,3 @@
Sam Alba <sam@dotcloud.com>
Joffrey Fuhrer <joffrey@dotcloud.com>
Ken Cochrane <ken@dotcloud.com>
Sam Alba <sam@dotcloud.com> (@samalba)
Joffrey Fuhrer <joffrey@dotcloud.com> (@shin-)
Ken Cochrane <ken@dotcloud.com> (@kencochrane)

View file

@ -384,7 +384,7 @@ func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regis
if res.StatusCode != 200 {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return utils.NewHTTPRequestError(fmt.Sprint("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
}
var jsonBody map[string]string
if err := json.Unmarshal(errBody, &jsonBody); err != nil {

View file

@ -241,13 +241,13 @@ func (runtime *Runtime) UpdateCapabilities(quiet bool) {
if !runtime.capabilities.SwapLimit && !quiet {
log.Printf("WARNING: Your kernel does not support cgroup swap limit.")
}
}
content, err3 := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward")
runtime.capabilities.IPv4Forwarding = err3 == nil && len(content) > 0 && content[0] == '1'
if !runtime.capabilities.IPv4Forwarding && !quiet {
log.Printf("WARNING: IPv4 forwarding is disabled.")
}
}
}
// FIXME: harmonize with NewGraph()

View file

@ -425,7 +425,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "metadata"))
imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
if err != nil {
// FIXME: Keep goging in case of error?
// FIXME: Keep going in case of error?
return err
}
img, err := NewImgJSON(imgJSON)
@ -565,7 +565,7 @@ func (srv *Server) poolAdd(kind, key string) error {
srv.pushingPool[key] = struct{}{}
break
default:
return fmt.Errorf("Unkown pool type")
return fmt.Errorf("Unknown pool type")
}
return nil
}
@ -579,7 +579,7 @@ func (srv *Server) poolRemove(kind, key string) error {
delete(srv.pushingPool, key)
break
default:
return fmt.Errorf("Unkown pool type")
return fmt.Errorf("Unknown pool type")
}
return nil
}
@ -693,7 +693,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
out = utils.NewWriteFlusher(out)
jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
if err != nil {
return "", fmt.Errorf("Error while retreiving the path for {%s}: %s", imgID, err)
return "", fmt.Errorf("Error while retrieving the path for {%s}: %s", imgID, err)
}
out.Write(sf.FormatStatus("", "Pushing %s", imgID))
@ -731,7 +731,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
return imgData.Checksum, nil
}
// FIXME: Allow to interupt current push when new push of same image is done.
// FIXME: Allow to interrupt current push when new push of same image is done.
func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
if err := srv.poolAdd("push", localName); err != nil {
return err

View file

@ -283,8 +283,8 @@ func TestPools(t *testing.T) {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("wait", "test3")
if err == nil || err.Error() != "Unkown pool type" {
t.Fatalf("Expected `Unkown pool type`")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
err = srv.poolRemove("pull", "test2")
@ -304,8 +304,8 @@ func TestPools(t *testing.T) {
t.Fatal(err)
}
err = srv.poolRemove("wait", "test3")
if err == nil || err.Error() != "Unkown pool type" {
t.Fatalf("Expected `Unkown pool type`")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
}

View file

@ -1,2 +1,2 @@
Guillaume Charmes <guillaume@dotcloud.com>
Solomon Hykes <solomon@dotcloud.com>
Guillaume Charmes <guillaume@dotcloud.com> (@creack)
Solomon Hykes <solomon@dotcloud.com> (@shykes)

View file

@ -1 +1 @@
Daniel Mizyrycki <daniel@dotcloud.com>
Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)

View file

@ -219,7 +219,7 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin
func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
if r := CompareKernelVersion(a, b); r != result {
t.Fatalf("Unepected kernel version comparaison result. Found %d, expected %d", r, result)
t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
}
}