mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
rebase docs
This commit is contained in:
commit
f69c465231
65 changed files with 1140 additions and 354 deletions
|
@ -1,5 +1,12 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 0.5.3 (2013-08-13)
|
||||||
|
* Runtime: Use docker group for socket permissions
|
||||||
|
- Runtime: Spawn shell within upstart script
|
||||||
|
- Builder: Make sure ENV instruction within build perform a commit each time
|
||||||
|
- Runtime: Handle ip route showing mask-less IP addresses
|
||||||
|
- Runtime: Add hostname to environment
|
||||||
|
|
||||||
## 0.5.2 (2013-08-08)
|
## 0.5.2 (2013-08-08)
|
||||||
* Builder: Forbid certain paths within docker build ADD
|
* Builder: Forbid certain paths within docker build ADD
|
||||||
- Runtime: Change network range to avoid conflict with EC2 DNS
|
- Runtime: Change network range to avoid conflict with EC2 DNS
|
||||||
|
|
|
@ -15,6 +15,7 @@ run cd /tmp && echo 'package main' > t.go && go test -a -i -v
|
||||||
run PKG=github.com/kr/pty REV=27435c699; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
run PKG=github.com/kr/pty REV=27435c699; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
||||||
run PKG=github.com/gorilla/context/ REV=708054d61e5; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
run PKG=github.com/gorilla/context/ REV=708054d61e5; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
||||||
run PKG=github.com/gorilla/mux/ REV=9b36453141c; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
run PKG=github.com/gorilla/mux/ REV=9b36453141c; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
||||||
|
run PKG=github.com/dotcloud/tar/ REV=d06045a6d9; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
|
||||||
# Run dependencies
|
# Run dependencies
|
||||||
run apt-get install -y iptables
|
run apt-get install -y iptables
|
||||||
# lxc requires updating ubuntu sources
|
# lxc requires updating ubuntu sources
|
||||||
|
@ -22,6 +23,9 @@ run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt
|
||||||
run apt-get update
|
run apt-get update
|
||||||
run apt-get install -y lxc
|
run apt-get install -y lxc
|
||||||
run apt-get install -y aufs-tools
|
run apt-get install -y aufs-tools
|
||||||
|
# Docker requires code.google.com/p/go.net/websocket
|
||||||
|
run apt-get install -y -q mercurial
|
||||||
|
run PKG=code.google.com/p/go.net REV=78ad7f42aa2e; hg clone https://$PKG /go/src/$PKG && cd /go/src/$PKG && hg checkout -r $REV
|
||||||
# Upload docker source
|
# Upload docker source
|
||||||
add . /go/src/github.com/dotcloud/docker
|
add . /go/src/github.com/dotcloud/docker
|
||||||
# Build the binary
|
# Build the binary
|
||||||
|
|
10
README.md
10
README.md
|
@ -147,7 +147,7 @@ Quick install on Ubuntu 12.04 and 12.10
|
||||||
---------------------------------------
|
---------------------------------------
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl get.docker.io | sudo sh -x
|
curl https://get.docker.io | sudo sh -x
|
||||||
```
|
```
|
||||||
|
|
||||||
Binary installs
|
Binary installs
|
||||||
|
@ -166,8 +166,12 @@ supported.
|
||||||
Installing from source
|
Installing from source
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
1. Make sure you have a [Go language](http://golang.org/doc/install)
|
1. Install Dependencies
|
||||||
compiler >= 1.1 and [git](http://git-scm.com) installed.
|
* [Go language 1.1.x](http://golang.org/doc/install)
|
||||||
|
* [git](http://git-scm.com)
|
||||||
|
* [lxc](http://lxc.sourceforge.net)
|
||||||
|
* [aufs-tools](http://aufs.sourceforge.net)
|
||||||
|
|
||||||
2. Checkout the source code
|
2. Checkout the source code
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
|
@ -97,8 +97,8 @@ func TestGetEvents(t *testing.T) {
|
||||||
listeners: make(map[string]chan utils.JSONMessage),
|
listeners: make(map[string]chan utils.JSONMessage),
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.LogEvent("fakeaction", "fakeid")
|
srv.LogEvent("fakeaction", "fakeid", "fakeimage")
|
||||||
srv.LogEvent("fakeaction2", "fakeid")
|
srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", "/events?since=1", nil)
|
req, err := http.NewRequest("GET", "/events?since=1", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -80,7 +80,7 @@ func LoadConfig(rootPath string) (*ConfigFile, error) {
|
||||||
}
|
}
|
||||||
b, err := ioutil.ReadFile(confFile)
|
b, err := ioutil.ReadFile(confFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return &configFile, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(b, &configFile.Configs); err != nil {
|
if err := json.Unmarshal(b, &configFile.Configs); err != nil {
|
||||||
|
|
|
@ -227,6 +227,11 @@ func (b *buildFile) CmdEntrypoint(args string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *buildFile) CmdWorkdir(workdir string) error {
|
||||||
|
b.config.WorkingDir = workdir
|
||||||
|
return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
||||||
|
}
|
||||||
|
|
||||||
func (b *buildFile) CmdVolume(args string) error {
|
func (b *buildFile) CmdVolume(args string) error {
|
||||||
if args == "" {
|
if args == "" {
|
||||||
return fmt.Errorf("Volume cannot be empty")
|
return fmt.Errorf("Volume cannot be empty")
|
||||||
|
|
55
commands.go
55
commands.go
|
@ -27,7 +27,7 @@ import (
|
||||||
"unicode"
|
"unicode"
|
||||||
)
|
)
|
||||||
|
|
||||||
const VERSION = "0.5.2-dev"
|
const VERSION = "0.5.3-dev"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
GITCOMMIT string
|
GITCOMMIT string
|
||||||
|
@ -857,10 +857,12 @@ func (cli *DockerCli) CmdPush(args ...string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := push(); err != nil {
|
if err := push(); err != nil {
|
||||||
if err == fmt.Errorf("Authentication is required.") {
|
if err.Error() == "Authentication is required." {
|
||||||
if err = cli.checkIfLogged("push"); err == nil {
|
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
|
||||||
return push()
|
if err := cli.CmdLogin(""); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
return push()
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1400,6 +1402,13 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
||||||
body, statusCode, err := cli.call("POST", "/containers/create", config)
|
body, statusCode, err := cli.call("POST", "/containers/create", config)
|
||||||
//if image not found try to pull it
|
//if image not found try to pull it
|
||||||
if statusCode == 404 {
|
if statusCode == 404 {
|
||||||
|
_, tag := utils.ParseRepositoryTag(config.Image)
|
||||||
|
if tag == "" {
|
||||||
|
tag = DEFAULTTAG
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Unable to find image '%s' (tag: %s) locally\n", config.Image, tag)
|
||||||
|
|
||||||
v := url.Values{}
|
v := url.Values{}
|
||||||
repos, tag := utils.ParseRepositoryTag(config.Image)
|
repos, tag := utils.ParseRepositoryTag(config.Image)
|
||||||
v.Set("fromImage", repos)
|
v.Set("fromImage", repos)
|
||||||
|
@ -1469,6 +1478,17 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
||||||
v.Set("stderr", "1")
|
v.Set("stderr", "1")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
signals := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
go func() {
|
||||||
|
for sig := range signals {
|
||||||
|
fmt.Printf("\nReceived signal: %s; cleaning up\n", sig)
|
||||||
|
if err := cli.CmdStop("-t", "4", runResult.ID); err != nil {
|
||||||
|
fmt.Printf("failed to stop container: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if err := cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, cli.in, cli.out); err != nil {
|
if err := cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, cli.in, cli.out); err != nil {
|
||||||
utils.Debugf("Error hijack: %s", err)
|
utils.Debugf("Error hijack: %s", err)
|
||||||
return err
|
return err
|
||||||
|
@ -1512,19 +1532,6 @@ func (cli *DockerCli) CmdCp(args ...string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cli *DockerCli) checkIfLogged(action string) error {
|
|
||||||
// If condition AND the login failed
|
|
||||||
if cli.configFile.Configs[auth.IndexServerAddress()].Username == "" {
|
|
||||||
if err := cli.CmdLogin(""); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if cli.configFile.Configs[auth.IndexServerAddress()].Username == "" {
|
|
||||||
return fmt.Errorf("Please login prior to %s. ('docker login')", action)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int, error) {
|
func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int, error) {
|
||||||
var params io.Reader
|
var params io.Reader
|
||||||
if data != nil {
|
if data != nil {
|
||||||
|
@ -1548,6 +1555,9 @@ func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int,
|
||||||
}
|
}
|
||||||
dial, err := net.Dial(cli.proto, cli.addr)
|
dial, err := net.Dial(cli.proto, cli.addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "connection refused") {
|
||||||
|
return nil, -1, fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
|
||||||
|
}
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
clientconn := httputil.NewClientConn(dial, nil)
|
clientconn := httputil.NewClientConn(dial, nil)
|
||||||
|
@ -1588,6 +1598,9 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer) e
|
||||||
}
|
}
|
||||||
dial, err := net.Dial(cli.proto, cli.addr)
|
dial, err := net.Dial(cli.proto, cli.addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "connection refused") {
|
||||||
|
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
clientconn := httputil.NewClientConn(dial, nil)
|
clientconn := httputil.NewClientConn(dial, nil)
|
||||||
|
@ -1634,6 +1647,9 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
|
||||||
|
|
||||||
dial, err := net.Dial(cli.proto, cli.addr)
|
dial, err := net.Dial(cli.proto, cli.addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "connection refused") {
|
||||||
|
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
clientconn := httputil.NewClientConn(dial, nil)
|
clientconn := httputil.NewClientConn(dial, nil)
|
||||||
|
@ -1762,7 +1778,10 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *Doc
|
||||||
err = out
|
err = out
|
||||||
}
|
}
|
||||||
|
|
||||||
configFile, _ := auth.LoadConfig(os.Getenv("HOME"))
|
configFile, e := auth.LoadConfig(os.Getenv("HOME"))
|
||||||
|
if e != nil {
|
||||||
|
fmt.Fprintf(err, "WARNING: %s\n", e)
|
||||||
|
}
|
||||||
return &DockerCli{
|
return &DockerCli{
|
||||||
proto: proto,
|
proto: proto,
|
||||||
addr: addr,
|
addr: addr,
|
||||||
|
|
|
@ -90,6 +90,69 @@ func TestRunHostname(t *testing.T) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestRunWorkdir checks that 'docker run -w' correctly sets a custom working directory
|
||||||
|
func TestRunWorkdir(t *testing.T) {
|
||||||
|
stdout, stdoutPipe := io.Pipe()
|
||||||
|
|
||||||
|
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
|
||||||
|
defer cleanup(globalRuntime)
|
||||||
|
|
||||||
|
c := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(c)
|
||||||
|
if err := cli.CmdRun("-w", "/foo/bar", unitTestImageID, "pwd"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
setTimeout(t, "Reading command output time out", 2*time.Second, func() {
|
||||||
|
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if cmdOutput != "/foo/bar\n" {
|
||||||
|
t.Fatalf("'pwd' should display '%s', not '%s'", "/foo/bar\n", cmdOutput)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
|
||||||
|
<-c
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRunWorkdirExists checks that 'docker run -w' correctly sets a custom working directory, even if it exists
|
||||||
|
func TestRunWorkdirExists(t *testing.T) {
|
||||||
|
stdout, stdoutPipe := io.Pipe()
|
||||||
|
|
||||||
|
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
|
||||||
|
defer cleanup(globalRuntime)
|
||||||
|
|
||||||
|
c := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(c)
|
||||||
|
if err := cli.CmdRun("-w", "/proc", unitTestImageID, "pwd"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
setTimeout(t, "Reading command output time out", 2*time.Second, func() {
|
||||||
|
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if cmdOutput != "/proc\n" {
|
||||||
|
t.Fatalf("'pwd' should display '%s', not '%s'", "/proc\n", cmdOutput)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
|
||||||
|
<-c
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
func TestRunExit(t *testing.T) {
|
func TestRunExit(t *testing.T) {
|
||||||
stdin, stdinPipe := io.Pipe()
|
stdin, stdinPipe := io.Pipe()
|
||||||
stdout, stdoutPipe := io.Pipe()
|
stdout, stdoutPipe := io.Pipe()
|
||||||
|
|
92
container.go
92
container.go
|
@ -2,6 +2,7 @@ package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/dotcloud/docker/term"
|
"github.com/dotcloud/docker/term"
|
||||||
|
@ -76,8 +77,10 @@ type Config struct {
|
||||||
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
|
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
|
||||||
Volumes map[string]struct{}
|
Volumes map[string]struct{}
|
||||||
VolumesFrom string
|
VolumesFrom string
|
||||||
|
WorkingDir string
|
||||||
Entrypoint []string
|
Entrypoint []string
|
||||||
NetworkDisabled bool
|
NetworkDisabled bool
|
||||||
|
Privileged bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type HostConfig struct {
|
type HostConfig struct {
|
||||||
|
@ -91,6 +94,10 @@ type BindMap struct {
|
||||||
Mode string
|
Mode string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvaidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.")
|
||||||
|
)
|
||||||
|
|
||||||
func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
|
func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
|
||||||
cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
|
cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
|
||||||
if len(args) > 0 && args[0] != "--help" {
|
if len(args) > 0 && args[0] != "--help" {
|
||||||
|
@ -99,6 +106,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
flHostname := cmd.String("h", "", "Container host name")
|
flHostname := cmd.String("h", "", "Container host name")
|
||||||
|
flWorkingDir := cmd.String("w", "", "Working directory inside the container")
|
||||||
flUser := cmd.String("u", "", "Username or UID")
|
flUser := cmd.String("u", "", "Username or UID")
|
||||||
flDetach := cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
|
flDetach := cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
|
||||||
flAttach := NewAttachOpts()
|
flAttach := NewAttachOpts()
|
||||||
|
@ -108,6 +116,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
||||||
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
|
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
|
||||||
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
|
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
|
||||||
flNetwork := cmd.Bool("n", true, "Enable networking for this container")
|
flNetwork := cmd.Bool("n", true, "Enable networking for this container")
|
||||||
|
flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container")
|
||||||
|
|
||||||
if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
|
if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
|
||||||
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||||
|
@ -137,6 +146,9 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
||||||
if *flDetach && len(flAttach) > 0 {
|
if *flDetach && len(flAttach) > 0 {
|
||||||
return nil, nil, cmd, fmt.Errorf("Conflicting options: -a and -d")
|
return nil, nil, cmd, fmt.Errorf("Conflicting options: -a and -d")
|
||||||
}
|
}
|
||||||
|
if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
|
||||||
|
return nil, nil, cmd, ErrInvaidWorikingDirectory
|
||||||
|
}
|
||||||
// If neither -d or -a are set, attach to everything by default
|
// If neither -d or -a are set, attach to everything by default
|
||||||
if len(flAttach) == 0 && !*flDetach {
|
if len(flAttach) == 0 && !*flDetach {
|
||||||
if !*flDetach {
|
if !*flDetach {
|
||||||
|
@ -194,6 +206,8 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
|
||||||
Volumes: flVolumes,
|
Volumes: flVolumes,
|
||||||
VolumesFrom: *flVolumesFrom,
|
VolumesFrom: *flVolumesFrom,
|
||||||
Entrypoint: entrypoint,
|
Entrypoint: entrypoint,
|
||||||
|
Privileged: *flPrivileged,
|
||||||
|
WorkingDir: *flWorkingDir,
|
||||||
}
|
}
|
||||||
hostConfig := &HostConfig{
|
hostConfig := &HostConfig{
|
||||||
Binds: binds,
|
Binds: binds,
|
||||||
|
@ -574,40 +588,12 @@ func (container *Container) Start(hostConfig *HostConfig) error {
|
||||||
binds[path.Clean(dst)] = bindMap
|
binds[path.Clean(dst)] = bindMap
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: evaluate volumes-from before individual volumes, so that the latter can override the former.
|
|
||||||
// Create the requested volumes volumes
|
|
||||||
if container.Volumes == nil || len(container.Volumes) == 0 {
|
if container.Volumes == nil || len(container.Volumes) == 0 {
|
||||||
container.Volumes = make(map[string]string)
|
container.Volumes = make(map[string]string)
|
||||||
container.VolumesRW = make(map[string]bool)
|
container.VolumesRW = make(map[string]bool)
|
||||||
|
|
||||||
for volPath := range container.Config.Volumes {
|
|
||||||
volPath = path.Clean(volPath)
|
|
||||||
// If an external bind is defined for this volume, use that as a source
|
|
||||||
if bindMap, exists := binds[volPath]; exists {
|
|
||||||
container.Volumes[volPath] = bindMap.SrcPath
|
|
||||||
if strings.ToLower(bindMap.Mode) == "rw" {
|
|
||||||
container.VolumesRW[volPath] = true
|
|
||||||
}
|
|
||||||
// Otherwise create an directory in $ROOT/volumes/ and use that
|
|
||||||
} else {
|
|
||||||
c, err := container.runtime.volumes.Create(nil, container, "", "", nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
srcPath, err := c.layer()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
container.Volumes[volPath] = srcPath
|
|
||||||
container.VolumesRW[volPath] = true // RW by default
|
|
||||||
}
|
|
||||||
// Create the mountpoint
|
|
||||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Apply volumes from another container if requested
|
||||||
if container.Config.VolumesFrom != "" {
|
if container.Config.VolumesFrom != "" {
|
||||||
c := container.runtime.Get(container.Config.VolumesFrom)
|
c := container.runtime.Get(container.Config.VolumesFrom)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
|
@ -615,7 +601,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
|
||||||
}
|
}
|
||||||
for volPath, id := range c.Volumes {
|
for volPath, id := range c.Volumes {
|
||||||
if _, exists := container.Volumes[volPath]; exists {
|
if _, exists := container.Volumes[volPath]; exists {
|
||||||
return fmt.Errorf("The requested volume %s overlap one of the volume of the container %s", volPath, c.ID)
|
continue
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -627,6 +613,38 @@ func (container *Container) Start(hostConfig *HostConfig) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create the requested volumes if they don't exist
|
||||||
|
for volPath := range container.Config.Volumes {
|
||||||
|
volPath = path.Clean(volPath)
|
||||||
|
// Skip existing volumes
|
||||||
|
if _, exists := container.Volumes[volPath]; exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If an external bind is defined for this volume, use that as a source
|
||||||
|
if bindMap, exists := binds[volPath]; exists {
|
||||||
|
container.Volumes[volPath] = bindMap.SrcPath
|
||||||
|
if strings.ToLower(bindMap.Mode) == "rw" {
|
||||||
|
container.VolumesRW[volPath] = true
|
||||||
|
}
|
||||||
|
// Otherwise create an directory in $ROOT/volumes/ and use that
|
||||||
|
} else {
|
||||||
|
c, err := container.runtime.volumes.Create(nil, container, "", "", nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
srcPath, err := c.layer()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
container.Volumes[volPath] = srcPath
|
||||||
|
container.VolumesRW[volPath] = true // RW by default
|
||||||
|
}
|
||||||
|
// Create the mountpoint
|
||||||
|
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := container.generateLXCConfig(); err != nil {
|
if err := container.generateLXCConfig(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -659,6 +677,18 @@ func (container *Container) Start(hostConfig *HostConfig) error {
|
||||||
"-e", "container=lxc",
|
"-e", "container=lxc",
|
||||||
"-e", "HOSTNAME="+container.Config.Hostname,
|
"-e", "HOSTNAME="+container.Config.Hostname,
|
||||||
)
|
)
|
||||||
|
if container.Config.WorkingDir != "" {
|
||||||
|
workingDir := path.Clean(container.Config.WorkingDir)
|
||||||
|
utils.Debugf("[working dir] working dir is %s", workingDir)
|
||||||
|
|
||||||
|
if err := os.MkdirAll(path.Join(container.RootfsPath(), workingDir), 0755); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
params = append(params,
|
||||||
|
"-w", workingDir,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
for _, elem := range container.Config.Env {
|
for _, elem := range container.Config.Env {
|
||||||
params = append(params, "-e", elem)
|
params = append(params, "-e", elem)
|
||||||
|
@ -813,7 +843,7 @@ func (container *Container) monitor() {
|
||||||
}
|
}
|
||||||
utils.Debugf("Process finished")
|
utils.Debugf("Process finished")
|
||||||
if container.runtime != nil && container.runtime.srv != nil {
|
if container.runtime != nil && container.runtime.srv != nil {
|
||||||
container.runtime.srv.LogEvent("die", container.ShortID())
|
container.runtime.srv.LogEvent("die", container.ShortID(), container.runtime.repositories.ImageName(container.Image))
|
||||||
}
|
}
|
||||||
exitCode := -1
|
exitCode := -1
|
||||||
if container.cmd != nil {
|
if container.cmd != nil {
|
||||||
|
|
|
@ -1283,6 +1283,71 @@ func TestRestartWithVolumes(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test for #1351
|
||||||
|
func TestVolumesFromWithVolumes(t *testing.T) {
|
||||||
|
runtime := mkRuntime(t)
|
||||||
|
defer nuke(runtime)
|
||||||
|
|
||||||
|
container, err := NewBuilder(runtime).Create(&Config{
|
||||||
|
Image: GetTestImage(runtime).ID,
|
||||||
|
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
|
||||||
|
Volumes: map[string]struct{}{"/test": {}},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer runtime.Destroy(container)
|
||||||
|
|
||||||
|
for key := range container.Config.Volumes {
|
||||||
|
if key != "/test" {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = container.Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := container.Volumes["/test"]
|
||||||
|
if expected == "" {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
container2, err := NewBuilder(runtime).Create(
|
||||||
|
&Config{
|
||||||
|
Image: GetTestImage(runtime).ID,
|
||||||
|
Cmd: []string{"cat", "/test/foo"},
|
||||||
|
VolumesFrom: container.ID,
|
||||||
|
Volumes: map[string]struct{}{"/test": {}},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer runtime.Destroy(container2)
|
||||||
|
|
||||||
|
output, err := container2.Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(output) != "bar" {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
if container.Volumes["/test"] != container2.Volumes["/test"] {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure it restarts successfully
|
||||||
|
_, err = container2.Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
|
func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
|
||||||
runtime := mkRuntime(t)
|
runtime := mkRuntime(t)
|
||||||
defer nuke(runtime)
|
defer nuke(runtime)
|
||||||
|
@ -1320,3 +1385,35 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPrivilegedCanMknod(t *testing.T) {
|
||||||
|
runtime := mkRuntime(t)
|
||||||
|
defer nuke(runtime)
|
||||||
|
if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
|
||||||
|
t.Fatal("Could not mknod into privileged container")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrivilegedCanMount(t *testing.T) {
|
||||||
|
runtime := mkRuntime(t)
|
||||||
|
defer nuke(runtime)
|
||||||
|
if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
|
||||||
|
t.Fatal("Could not mount into privileged container")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrivilegedCannotMknod(t *testing.T) {
|
||||||
|
runtime := mkRuntime(t)
|
||||||
|
defer nuke(runtime)
|
||||||
|
if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" {
|
||||||
|
t.Fatal("Could mknod into secure container")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrivilegedCannotMount(t *testing.T) {
|
||||||
|
runtime := mkRuntime(t)
|
||||||
|
defer nuke(runtime)
|
||||||
|
if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" {
|
||||||
|
t.Fatal("Could mount into secure container")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
|
||||||
level='INFO')
|
level='INFO')
|
||||||
client = docker.Client()
|
client = docker.Client()
|
||||||
processed = {}
|
processed = {}
|
||||||
|
processed_folders = []
|
||||||
|
|
||||||
|
|
||||||
def build_library(repository=None, branch=None, namespace=None, push=False,
|
def build_library(repository=None, branch=None, namespace=None, push=False,
|
||||||
|
@ -31,19 +32,34 @@ def build_library(repository=None, branch=None, namespace=None, push=False,
|
||||||
logger.info('Repository provided assumed to be a local path')
|
logger.info('Repository provided assumed to be a local path')
|
||||||
dst_folder = repository
|
dst_folder = repository
|
||||||
|
|
||||||
|
try:
|
||||||
|
client.version()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error('Could not reach the docker daemon. Please make sure it '
|
||||||
|
'is running.')
|
||||||
|
logger.warning('Also make sure you have access to the docker UNIX '
|
||||||
|
'socket (use sudo)')
|
||||||
|
return
|
||||||
|
|
||||||
#FIXME: set destination folder and only pull latest changes instead of
|
#FIXME: set destination folder and only pull latest changes instead of
|
||||||
# cloning the whole repo everytime
|
# cloning the whole repo everytime
|
||||||
if not dst_folder:
|
if not dst_folder:
|
||||||
logger.info('Cloning docker repo from {0}, branch: {1}'.format(
|
logger.info('Cloning docker repo from {0}, branch: {1}'.format(
|
||||||
repository, branch))
|
repository, branch))
|
||||||
try:
|
try:
|
||||||
dst_folder = git.clone_branch(repository, branch)
|
rep, dst_folder = git.clone_branch(repository, branch)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
logger.error('Source repository could not be fetched. Check '
|
logger.error('Source repository could not be fetched. Check '
|
||||||
'that the address is correct and the branch exists.')
|
'that the address is correct and the branch exists.')
|
||||||
return
|
return
|
||||||
for buildfile in os.listdir(os.path.join(dst_folder, 'library')):
|
try:
|
||||||
|
dirlist = os.listdir(os.path.join(dst_folder, 'library'))
|
||||||
|
except OSError as e:
|
||||||
|
logger.error('The path provided ({0}) could not be found or didn\'t'
|
||||||
|
'contain a library/ folder.'.format(dst_folder))
|
||||||
|
return
|
||||||
|
for buildfile in dirlist:
|
||||||
if buildfile == 'MAINTAINERS':
|
if buildfile == 'MAINTAINERS':
|
||||||
continue
|
continue
|
||||||
f = open(os.path.join(dst_folder, 'library', buildfile))
|
f = open(os.path.join(dst_folder, 'library', buildfile))
|
||||||
|
@ -92,20 +108,27 @@ def build_library(repository=None, branch=None, namespace=None, push=False,
|
||||||
f.close()
|
f.close()
|
||||||
if dst_folder != repository:
|
if dst_folder != repository:
|
||||||
rmtree(dst_folder, True)
|
rmtree(dst_folder, True)
|
||||||
|
for d in processed_folders:
|
||||||
|
rmtree(d, True)
|
||||||
summary.print_summary(logger)
|
summary.print_summary(logger)
|
||||||
|
|
||||||
|
|
||||||
def build_repo(repository, ref, docker_repo, docker_tag, namespace, push, registry):
|
def build_repo(repository, ref, docker_repo, docker_tag, namespace, push, registry):
|
||||||
docker_repo = '{0}/{1}'.format(namespace or 'library', docker_repo)
|
docker_repo = '{0}/{1}'.format(namespace or 'library', docker_repo)
|
||||||
img_id = None
|
img_id = None
|
||||||
|
dst_folder = None
|
||||||
if '{0}@{1}'.format(repository, ref) not in processed.keys():
|
if '{0}@{1}'.format(repository, ref) not in processed.keys():
|
||||||
logger.info('Cloning {0} (ref: {1})'.format(repository, ref))
|
logger.info('Cloning {0} (ref: {1})'.format(repository, ref))
|
||||||
dst_folder = git.clone(repository, ref)
|
if repository not in processed:
|
||||||
|
rep, dst_folder = git.clone(repository, ref)
|
||||||
|
processed[repository] = rep
|
||||||
|
processed_folders.append(dst_folder)
|
||||||
|
else:
|
||||||
|
dst_folder = git.checkout(processed[repository], ref)
|
||||||
if not 'Dockerfile' in os.listdir(dst_folder):
|
if not 'Dockerfile' in os.listdir(dst_folder):
|
||||||
raise RuntimeError('Dockerfile not found in cloned repository')
|
raise RuntimeError('Dockerfile not found in cloned repository')
|
||||||
logger.info('Building using dockerfile...')
|
logger.info('Building using dockerfile...')
|
||||||
img_id, logs = client.build(path=dst_folder, quiet=True)
|
img_id, logs = client.build(path=dst_folder, quiet=True)
|
||||||
rmtree(dst_folder, True)
|
|
||||||
else:
|
else:
|
||||||
img_id = processed['{0}@{1}'.format(repository, ref)]
|
img_id = processed['{0}@{1}'.format(repository, ref)]
|
||||||
logger.info('Committing to {0}:{1}'.format(docker_repo,
|
logger.info('Committing to {0}:{1}'.format(docker_repo,
|
||||||
|
@ -159,4 +182,4 @@ class Summary(object):
|
||||||
if logger:
|
if logger:
|
||||||
logger.info(s + success + details)
|
logger.info(s + success + details)
|
||||||
else:
|
else:
|
||||||
print s, success, details
|
print s, success, details
|
||||||
|
|
|
@ -16,6 +16,21 @@ def clone_tag(repo_url, tag, folder=None):
|
||||||
return clone(repo_url, 'refs/tags/' + tag, folder)
|
return clone(repo_url, 'refs/tags/' + tag, folder)
|
||||||
|
|
||||||
|
|
||||||
|
def checkout(rep, ref=None):
|
||||||
|
is_commit = False
|
||||||
|
if ref is None:
|
||||||
|
ref = 'refs/heads/master'
|
||||||
|
elif not ref.startswith('refs/'):
|
||||||
|
is_commit = True
|
||||||
|
if is_commit:
|
||||||
|
rep['HEAD'] = rep.commit(ref)
|
||||||
|
else:
|
||||||
|
rep['HEAD'] = rep.refs[ref]
|
||||||
|
indexfile = rep.index_path()
|
||||||
|
tree = rep["HEAD"].tree
|
||||||
|
index.build_index_from_tree(rep.path, indexfile, rep.object_store, tree)
|
||||||
|
return rep.path
|
||||||
|
|
||||||
def clone(repo_url, ref=None, folder=None):
|
def clone(repo_url, ref=None, folder=None):
|
||||||
is_commit = False
|
is_commit = False
|
||||||
if ref is None:
|
if ref is None:
|
||||||
|
@ -45,4 +60,4 @@ def clone(repo_url, ref=None, folder=None):
|
||||||
tree = rep["HEAD"].tree
|
tree = rep["HEAD"].tree
|
||||||
index.build_index_from_tree(rep.path, indexfile, rep.object_store, tree)
|
index.build_index_from_tree(rep.path, indexfile, rep.object_store, tree)
|
||||||
logger.debug("done")
|
logger.debug("done")
|
||||||
return folder
|
return rep, folder
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
dulwich==0.9.0
|
dulwich==0.9.0
|
||||||
docker-py==0.1.3
|
-e git://github.com/dotcloud/docker-py.git#egg=docker-py
|
||||||
|
|
|
@ -22,13 +22,27 @@
|
||||||
# must have access to the socket for the completions to function correctly
|
# must have access to the socket for the completions to function correctly
|
||||||
|
|
||||||
have docker && {
|
have docker && {
|
||||||
__docker_containers()
|
__docker_containers_all()
|
||||||
{
|
{
|
||||||
local containers
|
local containers
|
||||||
containers="$( docker ps -a -q )"
|
containers="$( docker ps -a -q )"
|
||||||
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__docker_containers_running()
|
||||||
|
{
|
||||||
|
local containers
|
||||||
|
containers="$( docker ps -q )"
|
||||||
|
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
||||||
|
}
|
||||||
|
|
||||||
|
__docker_containers_stopped()
|
||||||
|
{
|
||||||
|
local containers
|
||||||
|
containers="$( comm -13 <(docker ps -q | sort -u) <(docker ps -a -q | sort -u) )"
|
||||||
|
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
||||||
|
}
|
||||||
|
|
||||||
__docker_image_repos()
|
__docker_image_repos()
|
||||||
{
|
{
|
||||||
local repos
|
local repos
|
||||||
|
@ -85,7 +99,7 @@ _docker_docker()
|
||||||
_docker_attach()
|
_docker_attach()
|
||||||
{
|
{
|
||||||
if [ $cpos -eq $cword ]; then
|
if [ $cpos -eq $cword ]; then
|
||||||
__docker_containers
|
__docker_containers_running
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,7 +138,7 @@ _docker_commit()
|
||||||
COMPREPLY=( $( compgen -W "-author -m -run" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "-author -m -run" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
__docker_containers
|
__docker_containers_all
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
@ -132,7 +146,7 @@ _docker_commit()
|
||||||
_docker_diff()
|
_docker_diff()
|
||||||
{
|
{
|
||||||
if [ $cpos -eq $cword ]; then
|
if [ $cpos -eq $cword ]; then
|
||||||
__docker_containers
|
__docker_containers_all
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,7 +158,7 @@ _docker_events()
|
||||||
_docker_export()
|
_docker_export()
|
||||||
{
|
{
|
||||||
if [ $cpos -eq $cword ]; then
|
if [ $cpos -eq $cword ]; then
|
||||||
__docker_containers
|
__docker_containers_all
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,7 +226,7 @@ _docker_inspect()
|
||||||
|
|
||||||
_docker_kill()
|
_docker_kill()
|
||||||
{
|
{
|
||||||
__docker_containers
|
__docker_containers_running
|
||||||
}
|
}
|
||||||
|
|
||||||
_docker_login()
|
_docker_login()
|
||||||
|
@ -223,14 +237,14 @@ _docker_login()
|
||||||
_docker_logs()
|
_docker_logs()
|
||||||
{
|
{
|
||||||
if [ $cpos -eq $cword ]; then
|
if [ $cpos -eq $cword ]; then
|
||||||
__docker_containers
|
__docker_containers_all
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
_docker_port()
|
_docker_port()
|
||||||
{
|
{
|
||||||
if [ $cpos -eq $cword ]; then
|
if [ $cpos -eq $cword ]; then
|
||||||
__docker_containers
|
__docker_containers_all
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,7 +278,7 @@ _docker_restart()
|
||||||
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
__docker_containers
|
__docker_containers_all
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
@ -276,7 +290,7 @@ _docker_rm()
|
||||||
COMPREPLY=( $( compgen -W "-v" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "-v" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
__docker_containers
|
__docker_containers_stopped
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
@ -293,7 +307,7 @@ _docker_run()
|
||||||
_filedir
|
_filedir
|
||||||
;;
|
;;
|
||||||
-volumes-from)
|
-volumes-from)
|
||||||
__docker_containers
|
__docker_containers_all
|
||||||
;;
|
;;
|
||||||
-a|-c|-dns|-e|-entrypoint|-h|-m|-p|-u|-v)
|
-a|-c|-dns|-e|-entrypoint|-h|-m|-p|-u|-v)
|
||||||
return
|
return
|
||||||
|
@ -343,7 +357,7 @@ _docker_search()
|
||||||
|
|
||||||
_docker_start()
|
_docker_start()
|
||||||
{
|
{
|
||||||
__docker_containers
|
__docker_containers_stopped
|
||||||
}
|
}
|
||||||
|
|
||||||
_docker_stop()
|
_docker_stop()
|
||||||
|
@ -361,7 +375,7 @@ _docker_stop()
|
||||||
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
|
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
__docker_containers
|
__docker_containers_running
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
@ -374,7 +388,7 @@ _docker_tag()
|
||||||
_docker_top()
|
_docker_top()
|
||||||
{
|
{
|
||||||
if [ $cpos -eq $cword ]; then
|
if [ $cpos -eq $cword ]; then
|
||||||
__docker_containers
|
__docker_containers_running
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -385,7 +399,7 @@ _docker_version()
|
||||||
|
|
||||||
_docker_wait()
|
_docker_wait()
|
||||||
{
|
{
|
||||||
__docker_containers
|
__docker_containers_all
|
||||||
}
|
}
|
||||||
|
|
||||||
_docker()
|
_docker()
|
||||||
|
|
|
@ -16,6 +16,7 @@ Docker Remote API
|
||||||
|
|
||||||
- The Remote API is replacing rcli
|
- The Remote API is replacing rcli
|
||||||
- By default the Docker daemon listens on unix:///var/run/docker.sock and the client must have root access to interact with the daemon
|
- By default the Docker daemon listens on unix:///var/run/docker.sock and the client must have root access to interact with the daemon
|
||||||
|
- If a group named *docker* exists on your system, docker will apply ownership of the socket to the group
|
||||||
- The API tends to be REST, but for some complex commands, like attach
|
- The API tends to be REST, but for some complex commands, like attach
|
||||||
or pull, the HTTP connection is hijacked to transport stdout stdin
|
or pull, the HTTP connection is hijacked to transport stdout stdin
|
||||||
and stderr
|
and stderr
|
||||||
|
@ -48,6 +49,10 @@ What's new
|
||||||
|
|
||||||
**New!** You can now use ps args with docker top, like `docker top <container_id> aux`
|
**New!** You can now use ps args with docker top, like `docker top <container_id> aux`
|
||||||
|
|
||||||
|
.. http:get:: /events:
|
||||||
|
|
||||||
|
**New!** Image's name added in the events
|
||||||
|
|
||||||
:doc:`docker_remote_api_v1.3`
|
:doc:`docker_remote_api_v1.3`
|
||||||
*****************************
|
*****************************
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,9 @@ Create a container
|
||||||
"Dns":null,
|
"Dns":null,
|
||||||
"Image":"base",
|
"Image":"base",
|
||||||
"Volumes":{},
|
"Volumes":{},
|
||||||
"VolumesFrom":""
|
"VolumesFrom":"",
|
||||||
|
"WorkingDir":""
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
**Example response**:
|
**Example response**:
|
||||||
|
@ -195,7 +197,9 @@ Inspect a container
|
||||||
"Dns": null,
|
"Dns": null,
|
||||||
"Image": "base",
|
"Image": "base",
|
||||||
"Volumes": {},
|
"Volumes": {},
|
||||||
"VolumesFrom": ""
|
"VolumesFrom": "",
|
||||||
|
"WorkingDir":""
|
||||||
|
|
||||||
},
|
},
|
||||||
"State": {
|
"State": {
|
||||||
"Running": false,
|
"Running": false,
|
||||||
|
@ -746,7 +750,8 @@ Inspect an image
|
||||||
,"Dns":null,
|
,"Dns":null,
|
||||||
"Image":"base",
|
"Image":"base",
|
||||||
"Volumes":null,
|
"Volumes":null,
|
||||||
"VolumesFrom":""
|
"VolumesFrom":"",
|
||||||
|
"WorkingDir":""
|
||||||
},
|
},
|
||||||
"Size": 6824592
|
"Size": 6824592
|
||||||
}
|
}
|
||||||
|
@ -1095,6 +1100,37 @@ Create a new image from a container's changes
|
||||||
:statuscode 404: no such container
|
:statuscode 404: no such container
|
||||||
:statuscode 500: server error
|
:statuscode 500: server error
|
||||||
|
|
||||||
|
|
||||||
|
Monitor Docker's events
|
||||||
|
***********************
|
||||||
|
|
||||||
|
.. http:get:: /events
|
||||||
|
|
||||||
|
Get events from docker, either in real time via streaming, or via polling (using `since`)
|
||||||
|
|
||||||
|
**Example request**:
|
||||||
|
|
||||||
|
.. sourcecode:: http
|
||||||
|
|
||||||
|
POST /events?since=1374067924
|
||||||
|
|
||||||
|
**Example response**:
|
||||||
|
|
||||||
|
.. sourcecode:: http
|
||||||
|
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
|
||||||
|
{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
|
||||||
|
{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966}
|
||||||
|
{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970}
|
||||||
|
|
||||||
|
:query since: timestamp used for polling
|
||||||
|
:statuscode 200: no error
|
||||||
|
:statuscode 500: server error
|
||||||
|
|
||||||
|
|
||||||
3. Going further
|
3. Going further
|
||||||
================
|
================
|
||||||
|
|
||||||
|
|
|
@ -2,9 +2,10 @@
|
||||||
:description: Documentation for docker Registry and Registry API
|
:description: Documentation for docker Registry and Registry API
|
||||||
:keywords: docker, registry, api, index
|
:keywords: docker, registry, api, index
|
||||||
|
|
||||||
|
.. _registryindexspec:
|
||||||
|
|
||||||
=====================
|
=====================
|
||||||
Registry & index Spec
|
Registry & Index Spec
|
||||||
=====================
|
=====================
|
||||||
|
|
||||||
.. contents:: Table of Contents
|
.. contents:: Table of Contents
|
||||||
|
|
|
@ -13,7 +13,7 @@ Docker Usage
|
||||||
To list available commands, either run ``docker`` with no parameters or execute
|
To list available commands, either run ``docker`` with no parameters or execute
|
||||||
``docker help``::
|
``docker help``::
|
||||||
|
|
||||||
$ docker
|
$ sudo docker
|
||||||
Usage: docker [OPTIONS] COMMAND [arg...]
|
Usage: docker [OPTIONS] COMMAND [arg...]
|
||||||
-H=[tcp://127.0.0.1:4243]: tcp://host:port to bind/connect to or unix://path/to/socket to use
|
-H=[tcp://127.0.0.1:4243]: tcp://host:port to bind/connect to or unix://path/to/socket to use
|
||||||
|
|
||||||
|
|
|
@ -21,32 +21,44 @@ Examples
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker build .
|
sudo docker build .
|
||||||
|
|
||||||
| This will read the Dockerfile from the current directory. It will also send any other files and directories found in the current directory to the docker daemon.
|
This will read the ``Dockerfile`` from the current directory. It will
|
||||||
| The contents of this directory would be used by ADD commands found within the Dockerfile.
|
also send any other files and directories found in the current
|
||||||
| This will send a lot of data to the docker daemon if the current directory contains a lot of data.
|
directory to the ``docker`` daemon.
|
||||||
| If the absolute path is provided instead of '.', only the files and directories required by the ADD commands from the Dockerfile will be added to the context and transferred to the docker daemon.
|
|
||||||
|
|
The contents of this directory would be used by ``ADD`` commands found
|
||||||
|
within the ``Dockerfile``. This will send a lot of data to the
|
||||||
|
``docker`` daemon if the current directory contains a lot of data. If
|
||||||
|
the absolute path is provided instead of ``.`` then only the files and
|
||||||
|
directories required by the ADD commands from the ``Dockerfile`` will be
|
||||||
|
added to the context and transferred to the ``docker`` daemon.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker build -t vieux/apache:2.0 .
|
sudo docker build -t vieux/apache:2.0 .
|
||||||
|
|
||||||
| This will build like the preview example, but it will then tag the resulting image, the repository name will be 'vieux/apache' and the tag will be '2.0'
|
This will build like the previous example, but it will then tag the
|
||||||
|
resulting image. The repository name will be ``vieux/apache`` and the
|
||||||
|
tag will be ``2.0``
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker build - < Dockerfile
|
sudo docker build - < Dockerfile
|
||||||
|
|
||||||
| This will read a Dockerfile from Stdin without context. Due to the lack of a context, no contents of any local directory will be sent to the docker daemon.
|
This will read a ``Dockerfile`` from *stdin* without context. Due to
|
||||||
| ADD doesn't work when running in this mode due to the absence of the context, thus having no source files to copy to the container.
|
the lack of a context, no contents of any local directory will be sent
|
||||||
|
to the ``docker`` daemon. ``ADD`` doesn't work when running in this
|
||||||
|
mode because the absence of the context provides no source files to
|
||||||
|
copy to the container.
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker build github.com/creack/docker-firefox
|
sudo docker build github.com/creack/docker-firefox
|
||||||
|
|
||||||
| This will clone the github repository and use it as context. The Dockerfile at the root of the repository is used as Dockerfile.
|
This will clone the Github repository and use it as context. The
|
||||||
| Note that you can specify an arbitrary git repository by using the 'git://' schema.
|
``Dockerfile`` at the root of the repository is used as
|
||||||
|
``Dockerfile``. Note that you can specify an arbitrary git repository
|
||||||
|
by using the ``git://`` schema.
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
-m="": Commit message
|
-m="": Commit message
|
||||||
-author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
|
-author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
|
||||||
-run="": Config automatically applied when the image is run. "+`(ex: {"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
|
-run="": Config automatically applied when the image is
|
||||||
|
run. "+`(ex: {"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
|
||||||
|
|
||||||
Full -run example::
|
Full -run example::
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,6 @@ Displaying images visually
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
docker images -viz | dot -Tpng -o docker.png
|
sudo docker images -viz | dot -Tpng -o docker.png
|
||||||
|
|
||||||
.. image:: images/docker_images.gif
|
.. image:: images/docker_images.gif
|
||||||
|
|
|
@ -12,10 +12,11 @@
|
||||||
|
|
||||||
Create a new filesystem image from the contents of a tarball
|
Create a new filesystem image from the contents of a tarball
|
||||||
|
|
||||||
At this time, the URL must start with ``http`` and point to a single file archive
|
At this time, the URL must start with ``http`` and point to a single
|
||||||
(.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz)
|
file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) containing a
|
||||||
containing a root filesystem. If you would like to import from a local directory or archive,
|
root filesystem. If you would like to import from a local directory or
|
||||||
you can use the ``-`` parameter to take the data from standard in.
|
archive, you can use the ``-`` parameter to take the data from
|
||||||
|
standard in.
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
|
@ -23,19 +24,21 @@ Examples
|
||||||
Import from a remote location
|
Import from a remote location
|
||||||
.............................
|
.............................
|
||||||
|
|
||||||
``$ docker import http://example.com/exampleimage.tgz exampleimagerepo``
|
``$ sudo docker import http://example.com/exampleimage.tgz exampleimagerepo``
|
||||||
|
|
||||||
Import from a local file
|
Import from a local file
|
||||||
........................
|
........................
|
||||||
|
|
||||||
Import to docker via pipe and standard in
|
Import to docker via pipe and standard in
|
||||||
|
|
||||||
``$ cat exampleimage.tgz | docker import - exampleimagelocal``
|
``$ cat exampleimage.tgz | sudo docker import - exampleimagelocal``
|
||||||
|
|
||||||
Import from a local directory
|
Import from a local directory
|
||||||
.............................
|
.............................
|
||||||
|
|
||||||
``$ sudo tar -c . | docker import - exampleimagedir``
|
``$ sudo tar -c . | docker import - exampleimagedir``
|
||||||
|
|
||||||
Note the ``sudo`` in this example -- you must preserve the ownership of the files (especially root ownership)
|
Note the ``sudo`` in this example -- you must preserve the ownership
|
||||||
during the archiving with tar. If you are not root (or sudo) when you tar, then the ownerships might not get preserved.
|
of the files (especially root ownership) during the archiving with
|
||||||
|
tar. If you are not root (or sudo) when you tar, then the ownerships
|
||||||
|
might not get preserved.
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
-e=[]: Set environment variables
|
-e=[]: Set environment variables
|
||||||
-h="": Container host name
|
-h="": Container host name
|
||||||
-i=false: Keep stdin open even if not attached
|
-i=false: Keep stdin open even if not attached
|
||||||
|
-privileged=false: Give extended privileges to this container
|
||||||
-m=0: Memory limit (in bytes)
|
-m=0: Memory limit (in bytes)
|
||||||
-n=true: Enable networking for this container
|
-n=true: Enable networking for this container
|
||||||
-p=[]: Map a network port to the container
|
-p=[]: Map a network port to the container
|
||||||
|
@ -28,6 +29,7 @@
|
||||||
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "host-dir" is missing, then docker creates a new volume.
|
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "host-dir" is missing, then docker creates a new volume.
|
||||||
-volumes-from="": Mount all volumes from the given container.
|
-volumes-from="": Mount all volumes from the given container.
|
||||||
-entrypoint="": Overwrite the default entrypoint set by the image.
|
-entrypoint="": Overwrite the default entrypoint set by the image.
|
||||||
|
-w="": Working directory inside the container
|
||||||
|
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
|
@ -35,6 +37,48 @@ Examples
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
|
sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
|
||||||
|
|
||||||
|
This will create a container and print "test" to the console. The
|
||||||
|
``cidfile`` flag makes docker attempt to create a new file and write the
|
||||||
|
container ID to it. If the file exists already, docker will return an
|
||||||
|
error. Docker will close this file when docker run exits.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
docker run mount -t tmpfs none /var/spool/squid
|
||||||
|
|
||||||
|
This will *not* work, because by default, most potentially dangerous
|
||||||
|
kernel capabilities are dropped; including ``cap_sys_admin`` (which is
|
||||||
|
required to mount filesystems). However, the ``-privileged`` flag will
|
||||||
|
allow it to run:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
docker run -privileged mount -t tmpfs none /var/spool/squid
|
||||||
|
|
||||||
|
The ``-privileged`` flag gives *all* capabilities to the container,
|
||||||
|
and it also lifts all the limitations enforced by the ``device``
|
||||||
|
cgroup controller. In other words, the container can then do almost
|
||||||
|
everything that the host can do. This flag exists to allow special
|
||||||
|
use-cases, like running Docker within Docker.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
docker run -w /path/to/dir/ -i -t ubuntu pwd
|
||||||
|
|
||||||
|
The ``-w`` lets the command beeing executed inside directory given,
|
||||||
|
here /path/to/dir/. If the path does not exists it is created inside the
|
||||||
|
container.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
|
||||||
|
|
||||||
|
The ``-v`` flag mounts the current working directory into the container.
|
||||||
|
The ``-w`` lets the command beeing executed inside the current
|
||||||
|
working directory, by changeing into the directory to the value
|
||||||
|
returned by ``pwd``. So this combination executes the command
|
||||||
|
using the container, but inside the current working directory.
|
||||||
|
|
||||||
|
|
||||||
| This will create a container and print "test" to the console. The cidfile flag makes docker attempt to create a new file and write the container ID to it. If the file exists already, docker will return an error. Docker will close this file when docker run exits.
|
|
||||||
|
|
|
@ -10,5 +10,5 @@
|
||||||
|
|
||||||
Usage: docker search TERM
|
Usage: docker search TERM
|
||||||
|
|
||||||
Searches for the TERM parameter on the Docker index and prints out a list of repositories
|
Searches for the TERM parameter on the Docker index and prints out
|
||||||
that match.
|
a list of repositories that match.
|
||||||
|
|
|
@ -51,9 +51,7 @@ source_suffix = '.rst'
|
||||||
# The encoding of source files.
|
# The encoding of source files.
|
||||||
#source_encoding = 'utf-8-sig'
|
#source_encoding = 'utf-8-sig'
|
||||||
|
|
||||||
#disable the permalinks on headers, I find them really annoying
|
html_add_permalinks = u'¶'
|
||||||
html_add_permalinks = None
|
|
||||||
|
|
||||||
|
|
||||||
# The master toctree document.
|
# The master toctree document.
|
||||||
master_doc = 'toctree'
|
master_doc = 'toctree'
|
||||||
|
|
|
@ -5,18 +5,23 @@
|
||||||
Setting Up a Dev Environment
|
Setting Up a Dev Environment
|
||||||
============================
|
============================
|
||||||
|
|
||||||
To make it easier to contribute to Docker, we provide a standard development environment. It is important that
|
To make it easier to contribute to Docker, we provide a standard
|
||||||
the same environment be used for all tests, builds and releases. The standard development environment defines
|
development environment. It is important that the same environment be
|
||||||
all build dependencies: system libraries and binaries, go environment, go dependencies, etc.
|
used for all tests, builds and releases. The standard development
|
||||||
|
environment defines all build dependencies: system libraries and
|
||||||
|
binaries, go environment, go dependencies, etc.
|
||||||
|
|
||||||
|
|
||||||
Step 1: install docker
|
Step 1: install docker
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
Docker's build environment itself is a docker container, so the first step is to install docker on your system.
|
Docker's build environment itself is a Docker container, so the first
|
||||||
|
step is to install docker on your system.
|
||||||
|
|
||||||
You can follow the `install instructions most relevant to your system <https://docs.docker.io/en/latest/installation/>`.
|
You can follow the `install instructions most relevant to your system
|
||||||
Make sure you have a working, up-to-date docker installation, then continue to the next step.
|
<https://docs.docker.io/en/latest/installation/>`_. Make sure you have
|
||||||
|
a working, up-to-date docker installation, then continue to the next
|
||||||
|
step.
|
||||||
|
|
||||||
|
|
||||||
Step 2: check out the source
|
Step 2: check out the source
|
||||||
|
@ -35,24 +40,24 @@ When you are ready to build docker, run this command:
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
docker build -t docker .
|
sudo docker build -t docker .
|
||||||
|
|
||||||
This will build the revision currently checked out in the repository. Feel free to check out the version
|
This will build the revision currently checked out in the
|
||||||
of your choice.
|
repository. Feel free to check out the version of your choice.
|
||||||
|
|
||||||
If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated
|
If the build is successful, congratulations! You have produced a clean
|
||||||
in a standard build environment.
|
build of docker, neatly encapsulated in a standard build environment.
|
||||||
|
|
||||||
You can run an interactive session in the newly built container:
|
You can run an interactive session in the newly built container:
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
docker run -i -t docker bash
|
sudo docker run -i -t docker bash
|
||||||
|
|
||||||
|
|
||||||
To extract the binaries from the container:
|
To extract the binaries from the container:
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
docker run docker sh -c 'cat $(which docker)' > docker-build && chmod +x docker-build
|
sudo docker run docker sh -c 'cat $(which docker)' > docker-build && chmod +x docker-build
|
||||||
|
|
||||||
|
|
|
@ -9,27 +9,29 @@ CouchDB Service
|
||||||
|
|
||||||
.. include:: example_header.inc
|
.. include:: example_header.inc
|
||||||
|
|
||||||
Here's an example of using data volumes to share the same data between 2 couchdb containers.
|
Here's an example of using data volumes to share the same data between
|
||||||
This could be used for hot upgrades, testing different versions of couchdb on the same data, etc.
|
2 CouchDB containers. This could be used for hot upgrades, testing
|
||||||
|
different versions of CouchDB on the same data, etc.
|
||||||
|
|
||||||
Create first database
|
Create first database
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
Note that we're marking /var/lib/couchdb as a data volume.
|
Note that we're marking ``/var/lib/couchdb`` as a data volume.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
COUCH1=$(docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
||||||
|
|
||||||
Add data to the first database
|
Add data to the first database
|
||||||
------------------------------
|
------------------------------
|
||||||
|
|
||||||
We're assuming your docker host is reachable at `localhost`. If not, replace `localhost` with the public IP of your docker host.
|
We're assuming your docker host is reachable at `localhost`. If not,
|
||||||
|
replace `localhost` with the public IP of your docker host.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
HOST=localhost
|
HOST=localhost
|
||||||
URL="http://$HOST:$(docker port $COUCH1 5984)/_utils/"
|
URL="http://$HOST:$(sudo docker port $COUCH1 5984)/_utils/"
|
||||||
echo "Navigate to $URL in your browser, and use the couch interface to add data"
|
echo "Navigate to $URL in your browser, and use the couch interface to add data"
|
||||||
|
|
||||||
Create second database
|
Create second database
|
||||||
|
@ -39,7 +41,7 @@ This time, we're requesting shared access to $COUCH1's volumes.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
COUCH2=$(docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
||||||
|
|
||||||
Browse data on the second database
|
Browse data on the second database
|
||||||
----------------------------------
|
----------------------------------
|
||||||
|
@ -47,7 +49,8 @@ Browse data on the second database
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
HOST=localhost
|
HOST=localhost
|
||||||
URL="http://$HOST:$(docker port $COUCH2 5984)/_utils/"
|
URL="http://$HOST:$(sudo docker port $COUCH2 5984)/_utils/"
|
||||||
echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
|
echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
|
||||||
|
|
||||||
Congratulations, you are running 2 Couchdb containers, completely isolated from each other *except* for their data.
|
Congratulations, you are running 2 Couchdb containers, completely
|
||||||
|
isolated from each other *except* for their data.
|
||||||
|
|
|
@ -11,26 +11,28 @@ Hello World
|
||||||
|
|
||||||
This is the most basic example available for using Docker.
|
This is the most basic example available for using Docker.
|
||||||
|
|
||||||
Download the base container
|
Download the base image (named "ubuntu"):
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# Download an ubuntu image
|
# Download an ubuntu image
|
||||||
docker pull ubuntu
|
sudo docker pull ubuntu
|
||||||
|
|
||||||
The *base* image is a minimal *ubuntu* based container, alternatively you can select *busybox*, a bare
|
Alternatively to the *ubuntu* image, you can select *busybox*, a bare
|
||||||
minimal linux system. The images are retrieved from the docker repository.
|
minimal Linux system. The images are retrieved from the Docker
|
||||||
|
repository.
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
#run a simple echo command, that will echo hello world back to the console over standard out.
|
#run a simple echo command, that will echo hello world back to the console over standard out.
|
||||||
docker run base /bin/echo hello world
|
sudo docker run ubuntu /bin/echo hello world
|
||||||
|
|
||||||
**Explanation:**
|
**Explanation:**
|
||||||
|
|
||||||
|
- **"sudo"** execute the following commands as user *root*
|
||||||
- **"docker run"** run a command in a new container
|
- **"docker run"** run a command in a new container
|
||||||
- **"base"** is the image we want to run the command inside of.
|
- **"ubuntu"** is the image we want to run the command inside of.
|
||||||
- **"/bin/echo"** is the command we want to run in the container
|
- **"/bin/echo"** is the command we want to run in the container
|
||||||
- **"hello world"** is the input for the echo command
|
- **"hello world"** is the input for the echo command
|
||||||
|
|
||||||
|
|
|
@ -11,27 +11,35 @@ Hello World Daemon
|
||||||
|
|
||||||
The most boring daemon ever written.
|
The most boring daemon ever written.
|
||||||
|
|
||||||
This example assumes you have Docker installed and with the ubuntu image already imported ``docker pull ubuntu``.
|
This example assumes you have Docker installed and with the Ubuntu
|
||||||
We will use the ubuntu image to run a simple hello world daemon that will just print hello world to standard
|
image already imported ``docker pull ubuntu``. We will use the Ubuntu
|
||||||
out every second. It will continue to do this until we stop it.
|
image to run a simple hello world daemon that will just print hello
|
||||||
|
world to standard out every second. It will continue to do this until
|
||||||
|
we stop it.
|
||||||
|
|
||||||
**Steps:**
|
**Steps:**
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
CONTAINER_ID=$(docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
|
CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
|
||||||
|
|
||||||
We are going to run a simple hello world daemon in a new container made from the ubuntu image.
|
We are going to run a simple hello world daemon in a new container
|
||||||
|
made from the *ubuntu* image.
|
||||||
|
|
||||||
- **"docker run -d "** run a command in a new container. We pass "-d" so it runs as a daemon.
|
- **"docker run -d "** run a command in a new container. We pass "-d"
|
||||||
|
so it runs as a daemon.
|
||||||
- **"ubuntu"** is the image we want to run the command inside of.
|
- **"ubuntu"** is the image we want to run the command inside of.
|
||||||
- **"/bin/sh -c"** is the command we want to run in the container
|
- **"/bin/sh -c"** is the command we want to run in the container
|
||||||
- **"while true; do echo hello world; sleep 1; done"** is the mini script we want to run, that will just print hello world once a second until we stop it.
|
- **"while true; do echo hello world; sleep 1; done"** is the mini
|
||||||
- **$CONTAINER_ID** the output of the run command will return a container id, we can use in future commands to see what is going on with this process.
|
script we want to run, that will just print hello world once a
|
||||||
|
second until we stop it.
|
||||||
|
- **$CONTAINER_ID** the output of the run command will return a
|
||||||
|
container id, we can use in future commands to see what is going on
|
||||||
|
with this process.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker logs $CONTAINER_ID
|
sudo docker logs $CONTAINER_ID
|
||||||
|
|
||||||
Check the logs make sure it is working correctly.
|
Check the logs make sure it is working correctly.
|
||||||
|
|
||||||
|
@ -40,16 +48,17 @@ Check the logs make sure it is working correctly.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker attach $CONTAINER_ID
|
sudo docker attach $CONTAINER_ID
|
||||||
|
|
||||||
Attach to the container to see the results in realtime.
|
Attach to the container to see the results in realtime.
|
||||||
|
|
||||||
- **"docker attach**" This will allow us to attach to a background process to see what is going on.
|
- **"docker attach**" This will allow us to attach to a background
|
||||||
|
process to see what is going on.
|
||||||
- **$CONTAINER_ID** The Id of the container we want to attach too.
|
- **$CONTAINER_ID** The Id of the container we want to attach too.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker ps
|
sudo docker ps
|
||||||
|
|
||||||
Check the process list to make sure it is running.
|
Check the process list to make sure it is running.
|
||||||
|
|
||||||
|
@ -57,7 +66,7 @@ Check the process list to make sure it is running.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker stop $CONTAINER_ID
|
sudo docker stop $CONTAINER_ID
|
||||||
|
|
||||||
Stop the container, since we don't need it anymore.
|
Stop the container, since we don't need it anymore.
|
||||||
|
|
||||||
|
@ -66,7 +75,7 @@ Stop the container, since we don't need it anymore.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker ps
|
sudo docker ps
|
||||||
|
|
||||||
Make sure it is really stopped.
|
Make sure it is really stopped.
|
||||||
|
|
||||||
|
|
|
@ -9,10 +9,11 @@ Node.js Web App
|
||||||
|
|
||||||
.. include:: example_header.inc
|
.. include:: example_header.inc
|
||||||
|
|
||||||
The goal of this example is to show you how you can build your own docker images
|
The goal of this example is to show you how you can build your own
|
||||||
from a parent image using a ``Dockerfile`` . We will do that by making a simple
|
docker images from a parent image using a ``Dockerfile`` . We will do
|
||||||
Node.js hello world web application running on CentOS. You can get the full
|
that by making a simple Node.js hello world web application running on
|
||||||
source code at https://github.com/gasi/docker-node-hello.
|
CentOS. You can get the full source code at
|
||||||
|
https://github.com/gasi/docker-node-hello.
|
||||||
|
|
||||||
Create Node.js app
|
Create Node.js app
|
||||||
++++++++++++++++++
|
++++++++++++++++++
|
||||||
|
@ -109,16 +110,17 @@ Install your app dependencies using npm:
|
||||||
# Install app dependencies
|
# Install app dependencies
|
||||||
RUN cd /src; npm install
|
RUN cd /src; npm install
|
||||||
|
|
||||||
Your app binds to port ``8080`` so you’ll use the ``EXPOSE`` command to have it
|
Your app binds to port ``8080`` so you’ll use the ``EXPOSE`` command
|
||||||
mapped by the docker daemon:
|
to have it mapped by the docker daemon:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|
||||||
Last but not least, define the command to run your app using ``CMD`` which
|
Last but not least, define the command to run your app using ``CMD``
|
||||||
defines your runtime, i.e. ``node``, and the path to our app, i.e.
|
which defines your runtime, i.e. ``node``, and the path to our app,
|
||||||
``src/index.js`` (see the step where we added the source to the container):
|
i.e. ``src/index.js`` (see the step where we added the source to the
|
||||||
|
container):
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
@ -149,19 +151,20 @@ Your ``Dockerfile`` should now look like this:
|
||||||
Building your image
|
Building your image
|
||||||
+++++++++++++++++++
|
+++++++++++++++++++
|
||||||
|
|
||||||
Go to the directory that has your ``Dockerfile`` and run the following command
|
Go to the directory that has your ``Dockerfile`` and run the following
|
||||||
to build a docker image. The ``-t`` flag let’s you tag your image so it’s easier
|
command to build a docker image. The ``-t`` flag let’s you tag your
|
||||||
to find later using the ``docker images`` command:
|
image so it’s easier to find later using the ``docker images``
|
||||||
|
command:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker build -t <your username>/centos-node-hello .
|
sudo docker build -t <your username>/centos-node-hello .
|
||||||
|
|
||||||
Your image will now be listed by docker:
|
Your image will now be listed by docker:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker images
|
sudo docker images
|
||||||
|
|
||||||
> # Example
|
> # Example
|
||||||
> REPOSITORY TAG ID CREATED
|
> REPOSITORY TAG ID CREATED
|
||||||
|
@ -177,17 +180,17 @@ container running in the background. Run the image you previously built:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker run -d <your username>/centos-node-hello
|
sudo docker run -d <your username>/centos-node-hello
|
||||||
|
|
||||||
Print the output of your app:
|
Print the output of your app:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# Get container ID
|
# Get container ID
|
||||||
docker ps
|
sudo docker ps
|
||||||
|
|
||||||
# Print app output
|
# Print app output
|
||||||
docker logs <container id>
|
sudo docker logs <container id>
|
||||||
|
|
||||||
> # Example
|
> # Example
|
||||||
> Running on http://localhost:8080
|
> Running on http://localhost:8080
|
||||||
|
@ -225,8 +228,8 @@ Now you can call your app using ``curl`` (install if needed via:
|
||||||
>
|
>
|
||||||
> Hello World
|
> Hello World
|
||||||
|
|
||||||
We hope this tutorial helped you get up and running with Node.js and CentOS on
|
We hope this tutorial helped you get up and running with Node.js and
|
||||||
docker. You can get the full source code at
|
CentOS on docker. You can get the full source code at
|
||||||
https://github.com/gasi/docker-node-hello.
|
https://github.com/gasi/docker-node-hello.
|
||||||
|
|
||||||
Continue to :ref:`running_redis_service`.
|
Continue to :ref:`running_redis_service`.
|
||||||
|
|
|
@ -31,7 +31,7 @@ Run an interactive shell in Docker container.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker run -i -t ubuntu /bin/bash
|
sudo docker run -i -t ubuntu /bin/bash
|
||||||
|
|
||||||
Update its dependencies.
|
Update its dependencies.
|
||||||
|
|
||||||
|
@ -60,9 +60,9 @@ Finally, install PostgreSQL 9.2
|
||||||
|
|
||||||
apt-get -y install postgresql-9.2 postgresql-client-9.2 postgresql-contrib-9.2
|
apt-get -y install postgresql-9.2 postgresql-client-9.2 postgresql-contrib-9.2
|
||||||
|
|
||||||
Now, create a PostgreSQL superuser role that can create databases and other roles.
|
Now, create a PostgreSQL superuser role that can create databases and
|
||||||
Following Vagrant's convention the role will be named `docker` with `docker`
|
other roles. Following Vagrant's convention the role will be named
|
||||||
password assigned to it.
|
`docker` with `docker` password assigned to it.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
@ -75,27 +75,27 @@ role.
|
||||||
|
|
||||||
sudo -u postgres createdb -O docker docker
|
sudo -u postgres createdb -O docker docker
|
||||||
|
|
||||||
Adjust PostgreSQL configuration so that remote connections to the database are
|
Adjust PostgreSQL configuration so that remote connections to the
|
||||||
possible. Make sure that inside ``/etc/postgresql/9.2/main/pg_hba.conf`` you have
|
database are possible. Make sure that inside
|
||||||
following line:
|
``/etc/postgresql/9.2/main/pg_hba.conf`` you have following line:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
host all all 0.0.0.0/0 md5
|
host all all 0.0.0.0/0 md5
|
||||||
|
|
||||||
Additionaly, inside ``/etc/postgresql/9.2/main/postgresql.conf`` uncomment
|
Additionaly, inside ``/etc/postgresql/9.2/main/postgresql.conf``
|
||||||
``listen_address`` so it is as follows:
|
uncomment ``listen_addresses`` so it is as follows:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
listen_address='*'
|
listen_addresses='*'
|
||||||
|
|
||||||
*Note:* this PostgreSQL setup is for development only purposes. Refer to
|
*Note:* this PostgreSQL setup is for development only purposes. Refer
|
||||||
PostgreSQL documentation how to fine-tune these settings so that it is enough
|
to PostgreSQL documentation how to fine-tune these settings so that it
|
||||||
secure.
|
is enough secure.
|
||||||
|
|
||||||
Create an image and assign it a name. ``<container_id>`` is in the Bash prompt;
|
Create an image and assign it a name. ``<container_id>`` is in the
|
||||||
you can also locate it using ``docker ps -a``.
|
Bash prompt; you can also locate it using ``docker ps -a``.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ Finally, run PostgreSQL server via ``docker``.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
CONTAINER=$(docker run -d -p 5432 \
|
CONTAINER=$(sudo docker run -d -p 5432 \
|
||||||
-t <your username>/postgresql \
|
-t <your username>/postgresql \
|
||||||
/bin/su postgres -c '/usr/lib/postgresql/9.2/bin/postgres \
|
/bin/su postgres -c '/usr/lib/postgresql/9.2/bin/postgres \
|
||||||
-D /var/lib/postgresql/9.2/main \
|
-D /var/lib/postgresql/9.2/main \
|
||||||
|
@ -115,7 +115,7 @@ Connect the PostgreSQL server using ``psql``.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
CONTAINER_IP=$(docker inspect $CONTAINER | grep IPAddress | awk '{ print $2 }' | tr -d ',"')
|
CONTAINER_IP=$(sudo docker inspect $CONTAINER | grep IPAddress | awk '{ print $2 }' | tr -d ',"')
|
||||||
psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W
|
psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W
|
||||||
|
|
||||||
As before, create roles or databases if needed.
|
As before, create roles or databases if needed.
|
||||||
|
@ -132,13 +132,13 @@ Additionally, publish there your newly created image on Docker Index.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker login
|
sudo docker login
|
||||||
Username: <your username>
|
Username: <your username>
|
||||||
[...]
|
[...]
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker push <your username>/postgresql
|
sudo docker push <your username>/postgresql
|
||||||
|
|
||||||
PostgreSQL service auto-launch
|
PostgreSQL service auto-launch
|
||||||
------------------------------
|
------------------------------
|
||||||
|
@ -149,10 +149,10 @@ container starts.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker commit <container_id> <your username>/postgresql -run='{"Cmd": \
|
sudo docker commit <container_id> <your username>/postgresql -run='{"Cmd": \
|
||||||
["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.2/bin/postgres -D \
|
["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.2/bin/postgres -D \
|
||||||
/var/lib/postgresql/9.2/main -c \
|
/var/lib/postgresql/9.2/main -c \
|
||||||
config_file=/etc/postgresql/9.2/main/postgresql.conf"], PortSpecs": ["5432"]}
|
config_file=/etc/postgresql/9.2/main/postgresql.conf"], PortSpecs": ["5432"]}
|
||||||
|
|
||||||
From now on, just type ``docker run <your username>/postgresql`` and PostgreSQL
|
From now on, just type ``docker run <your username>/postgresql`` and
|
||||||
should automatically start.
|
PostgreSQL should automatically start.
|
||||||
|
|
|
@ -9,13 +9,16 @@ Python Web App
|
||||||
|
|
||||||
.. include:: example_header.inc
|
.. include:: example_header.inc
|
||||||
|
|
||||||
The goal of this example is to show you how you can author your own docker images using a parent image, making changes to it, and then saving the results as a new image. We will do that by making a simple hello flask web application image.
|
The goal of this example is to show you how you can author your own
|
||||||
|
docker images using a parent image, making changes to it, and then
|
||||||
|
saving the results as a new image. We will do that by making a simple
|
||||||
|
hello flask web application image.
|
||||||
|
|
||||||
**Steps:**
|
**Steps:**
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker pull shykes/pybuilder
|
sudo docker pull shykes/pybuilder
|
||||||
|
|
||||||
We are downloading the "shykes/pybuilder" docker image
|
We are downloading the "shykes/pybuilder" docker image
|
||||||
|
|
||||||
|
@ -27,52 +30,66 @@ We set a URL variable that points to a tarball of a simple helloflask web app
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
BUILD_JOB=$(docker run -d -t shykes/pybuilder:latest /usr/local/bin/buildapp $URL)
|
BUILD_JOB=$(sudo docker run -d -t shykes/pybuilder:latest /usr/local/bin/buildapp $URL)
|
||||||
|
|
||||||
Inside of the "shykes/pybuilder" image there is a command called buildapp, we are running that command and passing the $URL variable from step 2 to it, and running the whole thing inside of a new container. BUILD_JOB will be set with the new container_id.
|
Inside of the "shykes/pybuilder" image there is a command called
|
||||||
|
buildapp, we are running that command and passing the $URL variable
|
||||||
|
from step 2 to it, and running the whole thing inside of a new
|
||||||
|
container. BUILD_JOB will be set with the new container_id.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker attach $BUILD_JOB
|
sudo docker attach $BUILD_JOB
|
||||||
[...]
|
[...]
|
||||||
|
|
||||||
While this container is running, we can attach to the new container to see what is going on. Ctrl-C to disconnect.
|
While this container is running, we can attach to the new container to
|
||||||
|
see what is going on. Ctrl-C to disconnect.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker ps -a
|
sudo docker ps -a
|
||||||
|
|
||||||
List all docker containers. If this container has already finished running, it will still be listed here.
|
List all docker containers. If this container has already finished
|
||||||
|
running, it will still be listed here.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
BUILD_IMG=$(docker commit $BUILD_JOB _/builds/github.com/shykes/helloflask/master)
|
BUILD_IMG=$(sudo docker commit $BUILD_JOB _/builds/github.com/shykes/helloflask/master)
|
||||||
|
|
||||||
Save the changes we just made in the container to a new image called "_/builds/github.com/hykes/helloflask/master" and save the image id in the BUILD_IMG variable name.
|
Save the changes we just made in the container to a new image called
|
||||||
|
``_/builds/github.com/hykes/helloflask/master`` and save the image id in
|
||||||
|
the BUILD_IMG variable name.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
WEB_WORKER=$(docker run -d -p 5000 $BUILD_IMG /usr/local/bin/runapp)
|
WEB_WORKER=$(sudo docker run -d -p 5000 $BUILD_IMG /usr/local/bin/runapp)
|
||||||
|
|
||||||
- **"docker run -d "** run a command in a new container. We pass "-d" so it runs as a daemon.
|
- **"docker run -d "** run a command in a new container. We pass "-d"
|
||||||
- **"-p 5000"** the web app is going to listen on this port, so it must be mapped from the container to the host system.
|
so it runs as a daemon.
|
||||||
|
- **"-p 5000"** the web app is going to listen on this port, so it
|
||||||
|
must be mapped from the container to the host system.
|
||||||
- **"$BUILD_IMG"** is the image we want to run the command inside of.
|
- **"$BUILD_IMG"** is the image we want to run the command inside of.
|
||||||
- **/usr/local/bin/runapp** is the command which starts the web app.
|
- **/usr/local/bin/runapp** is the command which starts the web app.
|
||||||
|
|
||||||
Use the new image we just created and create a new container with network port 5000, and return the container id and store in the WEB_WORKER variable.
|
Use the new image we just created and create a new container with
|
||||||
|
network port 5000, and return the container id and store in the
|
||||||
|
WEB_WORKER variable.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker logs $WEB_WORKER
|
sudo docker logs $WEB_WORKER
|
||||||
* Running on http://0.0.0.0:5000/
|
* Running on http://0.0.0.0:5000/
|
||||||
|
|
||||||
View the logs for the new container using the WEB_WORKER variable, and if everything worked as planned you should see the line "Running on http://0.0.0.0:5000/" in the log output.
|
View the logs for the new container using the WEB_WORKER variable, and
|
||||||
|
if everything worked as planned you should see the line "Running on
|
||||||
|
http://0.0.0.0:5000/" in the log output.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
WEB_PORT=$(docker port $WEB_WORKER 5000)
|
WEB_PORT=$(docker port $WEB_WORKER 5000)
|
||||||
|
|
||||||
Look up the public-facing port which is NAT-ed. Find the private port used by the container and store it inside of the WEB_PORT variable.
|
Look up the public-facing port which is NAT-ed. Find the private port
|
||||||
|
used by the container and store it inside of the WEB_PORT variable.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
@ -80,7 +97,8 @@ Look up the public-facing port which is NAT-ed. Find the private port used by th
|
||||||
curl http://127.0.0.1:$WEB_PORT
|
curl http://127.0.0.1:$WEB_PORT
|
||||||
Hello world!
|
Hello world!
|
||||||
|
|
||||||
Access the web app using curl. If everything worked as planned you should see the line "Hello world!" inside of your console.
|
Access the web app using curl. If everything worked as planned you
|
||||||
|
should see the line "Hello world!" inside of your console.
|
||||||
|
|
||||||
**Video:**
|
**Video:**
|
||||||
|
|
||||||
|
|
|
@ -7,16 +7,17 @@
|
||||||
Running the Examples
|
Running the Examples
|
||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
|
All the examples assume your machine is running the docker daemon. To
|
||||||
|
run the docker daemon in the background, simply type:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
sudo docker -d &
|
sudo docker -d &
|
||||||
|
|
||||||
Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client
|
Now you can run docker in client mode: by defalt all commands will be
|
||||||
can run from any account.
|
forwarded to the ``docker`` daemon via a protected Unix socket, so you
|
||||||
|
must run as root.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# now you can run docker commands from any account.
|
sudo docker help
|
||||||
docker help
|
|
||||||
|
|
|
@ -16,12 +16,13 @@ Open a docker container
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker run -i -t base /bin/bash
|
sudo docker run -i -t ubuntu /bin/bash
|
||||||
|
|
||||||
Building your image
|
Building your image
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
Update your docker container, install the redis server. Once installed, exit out of docker.
|
Update your Docker container, install the Redis server. Once
|
||||||
|
installed, exit out of the Docker container.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
@ -45,7 +46,7 @@ container running in the background. Use your snapshot.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker run -d -p 6379 <your username>/redis /usr/bin/redis-server
|
sudo docker run -d -p 6379 <your username>/redis /usr/bin/redis-server
|
||||||
|
|
||||||
Test 1
|
Test 1
|
||||||
++++++
|
++++++
|
||||||
|
@ -54,8 +55,8 @@ Connect to the container with the redis-cli.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker ps # grab the new container id
|
sudo docker ps # grab the new container id
|
||||||
docker inspect <container_id> # grab the ipaddress of the container
|
sudo docker inspect <container_id> # grab the ipaddress of the container
|
||||||
redis-cli -h <ipaddress> -p 6379
|
redis-cli -h <ipaddress> -p 6379
|
||||||
redis 10.0.3.32:6379> set docker awesome
|
redis 10.0.3.32:6379> set docker awesome
|
||||||
OK
|
OK
|
||||||
|
@ -70,8 +71,8 @@ Connect to the host os with the redis-cli.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker ps # grab the new container id
|
sudo docker ps # grab the new container id
|
||||||
docker port <container_id> 6379 # grab the external port
|
sudo docker port <container_id> 6379 # grab the external port
|
||||||
ip addr show # grab the host ip address
|
ip addr show # grab the host ip address
|
||||||
redis-cli -h <host ipaddress> -p <external port>
|
redis-cli -h <host ipaddress> -p <external port>
|
||||||
redis 192.168.0.1:49153> set docker awesome
|
redis 192.168.0.1:49153> set docker awesome
|
||||||
|
|
|
@ -12,8 +12,16 @@ SSH Daemon Service
|
||||||
|
|
||||||
**Video:**
|
**Video:**
|
||||||
|
|
||||||
I've create a little screencast to show how to create a sshd service and connect to it. It is something like 11
|
I've create a little screencast to show how to create a sshd service
|
||||||
minutes and not entirely smooth, but gives you a good idea.
|
and connect to it. It is something like 11 minutes and not entirely
|
||||||
|
smooth, but gives you a good idea.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
This screencast was created before ``docker`` version 0.5.2, so the
|
||||||
|
daemon is unprotected and available via a TCP port. When you run
|
||||||
|
through the same steps in a newer version of ``docker``, you will
|
||||||
|
need to add ``sudo`` in front of each ``docker`` command in order
|
||||||
|
to reach the daemon over its protected Unix socket.
|
||||||
|
|
||||||
.. raw:: html
|
.. raw:: html
|
||||||
|
|
||||||
|
@ -24,7 +32,7 @@ minutes and not entirely smooth, but gives you a good idea.
|
||||||
You can also get this sshd container by using
|
You can also get this sshd container by using
|
||||||
::
|
::
|
||||||
|
|
||||||
docker pull dhrp/sshd
|
sudo docker pull dhrp/sshd
|
||||||
|
|
||||||
|
|
||||||
The password is 'screencast'
|
The password is 'screencast'
|
||||||
|
|
|
@ -9,40 +9,140 @@ FAQ
|
||||||
Most frequently asked questions.
|
Most frequently asked questions.
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
1. **How much does Docker cost?**
|
How much does Docker cost?
|
||||||
|
..........................
|
||||||
|
|
||||||
Docker is 100% free, it is open source, so you can use it without paying.
|
Docker is 100% free, it is open source, so you can use it without paying.
|
||||||
|
|
||||||
2. **What open source license are you using?**
|
What open source license are you using?
|
||||||
|
.......................................
|
||||||
|
|
||||||
We are using the Apache License Version 2.0, see it here: https://github.com/dotcloud/docker/blob/master/LICENSE
|
We are using the Apache License Version 2.0, see it here:
|
||||||
|
https://github.com/dotcloud/docker/blob/master/LICENSE
|
||||||
|
|
||||||
3. **Does Docker run on Mac OS X or Windows?**
|
Does Docker run on Mac OS X or Windows?
|
||||||
|
.......................................
|
||||||
|
|
||||||
Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a
|
Not at this time, Docker currently only runs on Linux, but you can
|
||||||
virtual machine on your box, and get the best of both worlds. Check out the :ref:`install_using_vagrant` and :ref:`windows` installation guides.
|
use VirtualBox to run Docker in a virtual machine on your box, and
|
||||||
|
get the best of both worlds. Check out the
|
||||||
|
:ref:`install_using_vagrant` and :ref:`windows` installation
|
||||||
|
guides.
|
||||||
|
|
||||||
4. **How do containers compare to virtual machines?**
|
How do containers compare to virtual machines?
|
||||||
|
..............................................
|
||||||
|
|
||||||
They are complementary. VMs are best used to allocate chunks of hardware resources. Containers operate at the process level, which makes them very lightweight and perfect as a unit of software delivery.
|
They are complementary. VMs are best used to allocate chunks of
|
||||||
|
hardware resources. Containers operate at the process level, which
|
||||||
|
makes them very lightweight and perfect as a unit of software
|
||||||
|
delivery.
|
||||||
|
|
||||||
5. **Can I help by adding some questions and answers?**
|
What does Docker add to just plain LXC?
|
||||||
|
.......................................
|
||||||
|
|
||||||
|
Docker is not a replacement for LXC. "LXC" refers to capabilities
|
||||||
|
of the Linux kernel (specifically namespaces and control groups)
|
||||||
|
which allow sandboxing processes from one another, and controlling
|
||||||
|
their resource allocations. On top of this low-level foundation of
|
||||||
|
kernel features, Docker offers a high-level tool with several
|
||||||
|
powerful functionalities:
|
||||||
|
|
||||||
|
* *Portable deployment across machines.*
|
||||||
|
Docker defines a format for bundling an application and all its
|
||||||
|
dependencies into a single object which can be transferred to
|
||||||
|
any Docker-enabled machine, and executed there with the
|
||||||
|
guarantee that the execution environment exposed to the
|
||||||
|
application will be the same. LXC implements process sandboxing,
|
||||||
|
which is an important pre-requisite for portable deployment, but
|
||||||
|
that alone is not enough for portable deployment. If you sent me
|
||||||
|
a copy of your application installed in a custom LXC
|
||||||
|
configuration, it would almost certainly not run on my machine
|
||||||
|
the way it does on yours, because it is tied to your machine's
|
||||||
|
specific configuration: networking, storage, logging, distro,
|
||||||
|
etc. Docker defines an abstraction for these machine-specific
|
||||||
|
settings, so that the exact same Docker container can run -
|
||||||
|
unchanged - on many different machines, with many different
|
||||||
|
configurations.
|
||||||
|
|
||||||
|
* *Application-centric.*
|
||||||
|
Docker is optimized for the deployment of applications, as
|
||||||
|
opposed to machines. This is reflected in its API, user
|
||||||
|
interface, design philosophy and documentation. By contrast, the
|
||||||
|
``lxc`` helper scripts focus on containers as lightweight
|
||||||
|
machines - basically servers that boot faster and need less
|
||||||
|
RAM. We think there's more to containers than just that.
|
||||||
|
|
||||||
|
* *Automatic build.*
|
||||||
|
Docker includes :ref:`a tool for developers to automatically
|
||||||
|
assemble a container from their source code <dockerbuilder>`,
|
||||||
|
with full control over application dependencies, build tools,
|
||||||
|
packaging etc. They are free to use ``make, maven, chef, puppet,
|
||||||
|
salt,`` Debian packages, RPMs, source tarballs, or any
|
||||||
|
combination of the above, regardless of the configuration of the
|
||||||
|
machines.
|
||||||
|
|
||||||
|
* *Versioning.*
|
||||||
|
Docker includes git-like capabilities for tracking successive
|
||||||
|
versions of a container, inspecting the diff between versions,
|
||||||
|
committing new versions, rolling back etc. The history also
|
||||||
|
includes how a container was assembled and by whom, so you get
|
||||||
|
full traceability from the production server all the way back to
|
||||||
|
the upstream developer. Docker also implements incremental
|
||||||
|
uploads and downloads, similar to ``git pull``, so new versions
|
||||||
|
of a container can be transferred by only sending diffs.
|
||||||
|
|
||||||
|
* *Component re-use.*
|
||||||
|
Any container can be used as a :ref:`"base image"
|
||||||
|
<base_image_def>` to create more specialized components. This
|
||||||
|
can be done manually or as part of an automated build. For
|
||||||
|
example you can prepare the ideal Python environment, and use it
|
||||||
|
as a base for 10 different applications. Your ideal Postgresql
|
||||||
|
setup can be re-used for all your future projects. And so on.
|
||||||
|
|
||||||
|
* *Sharing.*
|
||||||
|
Docker has access to a `public registry
|
||||||
|
<http://index.docker.io>`_ where thousands of people have
|
||||||
|
uploaded useful containers: anything from Redis, CouchDB,
|
||||||
|
Postgres to IRC bouncers to Rails app servers to Hadoop to base
|
||||||
|
images for various Linux distros. The :ref:`registry
|
||||||
|
<registryindexspec>` also includes an official "standard
|
||||||
|
library" of useful containers maintained by the Docker team. The
|
||||||
|
registry itself is open-source, so anyone can deploy their own
|
||||||
|
registry to store and transfer private containers, for internal
|
||||||
|
server deployments for example.
|
||||||
|
|
||||||
|
* *Tool ecosystem.*
|
||||||
|
Docker defines an API for automating and customizing the
|
||||||
|
creation and deployment of containers. There are a huge number
|
||||||
|
of tools integrating with Docker to extend its
|
||||||
|
capabilities. PaaS-like deployment (Dokku, Deis, Flynn),
|
||||||
|
multi-node orchestration (Maestro, Salt, Mesos, Openstack Nova),
|
||||||
|
management dashboards (docker-ui, Openstack Horizon, Shipyard),
|
||||||
|
configuration management (Chef, Puppet), continuous integration
|
||||||
|
(Jenkins, Strider, Travis), etc. Docker is rapidly establishing
|
||||||
|
itself as the standard for container-based tooling.
|
||||||
|
|
||||||
|
Can I help by adding some questions and answers?
|
||||||
|
................................................
|
||||||
|
|
||||||
Definitely! You can fork `the repo`_ and edit the documentation sources.
|
Definitely! You can fork `the repo`_ and edit the documentation sources.
|
||||||
|
|
||||||
|
|
||||||
42. **Where can I find more answers?**
|
Where can I find more answers?
|
||||||
|
..............................
|
||||||
|
|
||||||
You can find more answers on:
|
You can find more answers on:
|
||||||
|
|
||||||
* `Docker club mailinglist`_
|
* `Docker user mailinglist`_
|
||||||
|
* `Docker developer mailinglist`_
|
||||||
* `IRC, docker on freenode`_
|
* `IRC, docker on freenode`_
|
||||||
* `Github`_
|
* `Github`_
|
||||||
* `Ask questions on Stackoverflow`_
|
* `Ask questions on Stackoverflow`_
|
||||||
* `Join the conversation on Twitter`_
|
* `Join the conversation on Twitter`_
|
||||||
|
|
||||||
|
|
||||||
.. _Docker club mailinglist: https://groups.google.com/d/forum/docker-club
|
.. _Docker user mailinglist: https://groups.google.com/d/forum/docker-user
|
||||||
|
.. _Docker developer mailinglist: https://groups.google.com/d/forum/docker-dev
|
||||||
.. _the repo: http://www.github.com/dotcloud/docker
|
.. _the repo: http://www.github.com/dotcloud/docker
|
||||||
.. _IRC, docker on freenode: irc://chat.freenode.net#docker
|
.. _IRC, docker on freenode: irc://chat.freenode.net#docker
|
||||||
.. _Github: http://www.github.com/dotcloud/docker
|
.. _Github: http://www.github.com/dotcloud/docker
|
||||||
|
|
|
@ -90,7 +90,7 @@ Docker can now be installed on Amazon EC2 with a single vagrant command. Vagrant
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker
|
sudo docker
|
||||||
|
|
||||||
|
|
||||||
Continue with the :ref:`hello_world` example.
|
Continue with the :ref:`hello_world` example.
|
||||||
|
|
|
@ -56,10 +56,10 @@ Run your first container!
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# check your docker version
|
# check your docker version
|
||||||
./docker version
|
sudo ./docker version
|
||||||
|
|
||||||
# run a container and open an interactive shell in the container
|
# run a container and open an interactive shell in the container
|
||||||
./docker run -i -t ubuntu /bin/bash
|
sudo ./docker run -i -t ubuntu /bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ Verify it worked
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
|
# download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
|
||||||
docker run -i -t ubuntu /bin/bash
|
sudo docker run -i -t ubuntu /bin/bash
|
||||||
|
|
||||||
# type 'exit' to exit
|
# type 'exit' to exit
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ Verify it worked
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
|
# download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
|
||||||
docker run -i -t ubuntu /bin/bash
|
sudo docker run -i -t ubuntu /bin/bash
|
||||||
|
|
||||||
# type exit to exit
|
# type exit to exit
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ Now you are in the VM, run docker
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker
|
sudo docker
|
||||||
|
|
||||||
|
|
||||||
Continue with the :ref:`hello_world` example.
|
Continue with the :ref:`hello_world` example.
|
||||||
|
|
|
@ -9,11 +9,13 @@ The Basics
|
||||||
Starting Docker
|
Starting Docker
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
If you have used one of the quick install paths', Docker may have been installed with upstart, Ubuntu's
|
If you have used one of the quick install paths', Docker may have been
|
||||||
system for starting processes at boot time. You should be able to run ``docker help`` and get output.
|
installed with upstart, Ubuntu's system for starting processes at boot
|
||||||
|
time. You should be able to run ``sudo docker help`` and get output.
|
||||||
|
|
||||||
If you get ``docker: command not found`` or something like ``/var/lib/docker/repositories: permission denied``
|
If you get ``docker: command not found`` or something like
|
||||||
you will need to specify the path to it and manually start it.
|
``/var/lib/docker/repositories: permission denied`` you will need to
|
||||||
|
specify the path to it and manually start it.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
@ -27,45 +29,84 @@ Running an interactive shell
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# Download an ubuntu image
|
# Download an ubuntu image
|
||||||
docker pull ubuntu
|
sudo docker pull ubuntu
|
||||||
|
|
||||||
# Run an interactive shell in the ubuntu image,
|
# Run an interactive shell in the ubuntu image,
|
||||||
# allocate a tty, attach stdin and stdout
|
# allocate a tty, attach stdin and stdout
|
||||||
docker run -i -t ubuntu /bin/bash
|
sudo docker run -i -t ubuntu /bin/bash
|
||||||
|
|
||||||
Bind Docker to another host/port or a unix socket
|
Why ``sudo``?
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The ``docker`` daemon always runs as root, and since ``docker``
|
||||||
|
version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP
|
||||||
|
port. By default that Unix socket is owned by the user *root*, and so,
|
||||||
|
by default, you can access it with ``sudo``.
|
||||||
|
|
||||||
|
Starting in version 0.5.3, if you create a Unix group called *docker*
|
||||||
|
and add users to it, then the ``docker`` daemon will make the
|
||||||
|
ownership of the Unix socket read/writable by the *docker* group when
|
||||||
|
the daemon starts. The ``docker`` daemon must always run as root, but
|
||||||
|
if you run the ``docker`` client as a user in the *docker* group then
|
||||||
|
you don't need to add ``sudo`` to all the client commands.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
# Add the docker group
|
||||||
|
sudo groupadd docker
|
||||||
|
|
||||||
|
# Add the ubuntu user to the docker group
|
||||||
|
sudo gpasswd -a ubuntu docker
|
||||||
|
|
||||||
|
# Restart the docker daemon
|
||||||
|
sudo service docker restart
|
||||||
|
|
||||||
|
Bind Docker to another host/port or a Unix socket
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
|
|
||||||
With -H it is possible to make the Docker daemon to listen on a specific ip and port. By default, it will listen on 127.0.0.1:4243 to allow only local connections but you can set it to 0.0.0.0:4243 or a specific host ip to give access to everybody.
|
.. DANGER:: Changing the default ``docker`` daemon binding to a TCP
|
||||||
|
port or Unix *docker* user group will increase your security risks
|
||||||
|
by allowing non-root users to potentially gain *root* access on the
|
||||||
|
host (`e.g. #1369
|
||||||
|
<https://github.com/dotcloud/docker/issues/1369>`_). Make sure you
|
||||||
|
control access to ``docker``.
|
||||||
|
|
||||||
Similarly, the Docker client can use -H to connect to a custom port.
|
With -H it is possible to make the Docker daemon to listen on a
|
||||||
|
specific ip and port. By default, it will listen on
|
||||||
|
``unix:///var/run/docker.sock`` to allow only local connections by the
|
||||||
|
*root* user. You *could* set it to 0.0.0.0:4243 or a specific host ip to
|
||||||
|
give access to everybody, but that is **not recommended** because then
|
||||||
|
it is trivial for someone to gain root access to the host where the
|
||||||
|
daemon is running.
|
||||||
|
|
||||||
|
Similarly, the Docker client can use ``-H`` to connect to a custom port.
|
||||||
|
|
||||||
|
``-H`` accepts host and port assignment in the following format:
|
||||||
|
``tcp://[host][:port]`` or ``unix://path``
|
||||||
|
|
||||||
-H accepts host and port assignment in the following format: tcp://[host][:port] or unix://path
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
* tcp://host -> tcp connection on host:4243
|
* ``tcp://host:4243`` -> tcp connection on host:4243
|
||||||
* tcp://host:port -> tcp connection on host:port
|
* ``unix://path/to/socket`` -> unix socket located at ``path/to/socket``
|
||||||
* tcp://:port -> tcp connection on 127.0.0.1:port
|
|
||||||
* unix://path/to/socket -> unix socket located at path/to/socket
|
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# Run docker in daemon mode
|
# Run docker in daemon mode
|
||||||
sudo <path to>/docker -H 0.0.0.0:5555 -d &
|
sudo <path to>/docker -H 0.0.0.0:5555 -d &
|
||||||
# Download an ubuntu image
|
# Download an ubuntu image
|
||||||
docker -H :5555 pull ubuntu
|
sudo docker -H :5555 pull ubuntu
|
||||||
|
|
||||||
You can use multiple -H, for example, if you want to listen
|
You can use multiple ``-H``, for example, if you want to listen on
|
||||||
on both tcp and a unix socket
|
both TCP and a Unix socket
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# Run docker in daemon mode
|
# Run docker in daemon mode
|
||||||
sudo <path to>/docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -d &
|
sudo <path to>/docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -d &
|
||||||
# Download an ubuntu image
|
# Download an ubuntu image, use default Unix socket
|
||||||
docker pull ubuntu
|
sudo docker pull ubuntu
|
||||||
# OR
|
# OR use the TCP port
|
||||||
docker -H unix:///var/run/docker.sock pull ubuntu
|
sudo docker -H tcp://127.0.0.1:4243 pull ubuntu
|
||||||
|
|
||||||
Starting a long-running worker process
|
Starting a long-running worker process
|
||||||
--------------------------------------
|
--------------------------------------
|
||||||
|
@ -73,13 +114,13 @@ Starting a long-running worker process
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# Start a very useful long-running process
|
# Start a very useful long-running process
|
||||||
JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
|
JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
|
||||||
|
|
||||||
# Collect the output of the job so far
|
# Collect the output of the job so far
|
||||||
docker logs $JOB
|
sudo docker logs $JOB
|
||||||
|
|
||||||
# Kill the job
|
# Kill the job
|
||||||
docker kill $JOB
|
sudo docker kill $JOB
|
||||||
|
|
||||||
|
|
||||||
Listing all running containers
|
Listing all running containers
|
||||||
|
@ -87,7 +128,7 @@ Listing all running containers
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker ps
|
sudo docker ps
|
||||||
|
|
||||||
Expose a service on a TCP port
|
Expose a service on a TCP port
|
||||||
------------------------------
|
------------------------------
|
||||||
|
@ -95,10 +136,10 @@ Expose a service on a TCP port
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# Expose port 4444 of this container, and tell netcat to listen on it
|
# Expose port 4444 of this container, and tell netcat to listen on it
|
||||||
JOB=$(docker run -d -p 4444 ubuntu /bin/nc -l -p 4444)
|
JOB=$(sudo docker run -d -p 4444 ubuntu /bin/nc -l -p 4444)
|
||||||
|
|
||||||
# Which public port is NATed to my container?
|
# Which public port is NATed to my container?
|
||||||
PORT=$(docker port $JOB 4444)
|
PORT=$(sudo docker port $JOB 4444)
|
||||||
|
|
||||||
# Connect to the public port via the host's public address
|
# Connect to the public port via the host's public address
|
||||||
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
|
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
|
||||||
|
@ -107,7 +148,7 @@ Expose a service on a TCP port
|
||||||
echo hello world | nc $IP $PORT
|
echo hello world | nc $IP $PORT
|
||||||
|
|
||||||
# Verify that the network connection worked
|
# Verify that the network connection worked
|
||||||
echo "Daemon received: $(docker logs $JOB)"
|
echo "Daemon received: $(sudo docker logs $JOB)"
|
||||||
|
|
||||||
|
|
||||||
Committing (saving) a container state
|
Committing (saving) a container state
|
||||||
|
@ -115,21 +156,23 @@ Committing (saving) a container state
|
||||||
|
|
||||||
Save your containers state to a container image, so the state can be re-used.
|
Save your containers state to a container image, so the state can be re-used.
|
||||||
|
|
||||||
When you commit your container only the differences between the image the container was created from
|
When you commit your container only the differences between the image
|
||||||
and the current state of the container will be stored (as a diff). See which images you already have
|
the container was created from and the current state of the container
|
||||||
using ``docker images``
|
will be stored (as a diff). See which images you already have using
|
||||||
|
``sudo docker images``
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# Commit your container to a new named image
|
# Commit your container to a new named image
|
||||||
docker commit <container_id> <some_name>
|
sudo docker commit <container_id> <some_name>
|
||||||
|
|
||||||
# List your containers
|
# List your containers
|
||||||
docker images
|
sudo docker images
|
||||||
|
|
||||||
You now have a image state from which you can create new instances.
|
You now have a image state from which you can create new instances.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Read more about :ref:`working_with_the_repository` or continue to the complete :ref:`cli`
|
Read more about :ref:`working_with_the_repository` or continue to the
|
||||||
|
complete :ref:`cli`
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
|
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
|
||||||
:keywords: builder, docker, Dockerfile, automation, image creation
|
:keywords: builder, docker, Dockerfile, automation, image creation
|
||||||
|
|
||||||
|
.. _dockerbuilder:
|
||||||
|
|
||||||
==================
|
==================
|
||||||
Dockerfile Builder
|
Dockerfile Builder
|
||||||
==================
|
==================
|
||||||
|
@ -23,12 +25,12 @@ describe the steps to assemble the image.
|
||||||
Then call ``docker build`` with the path of your source repository as
|
Then call ``docker build`` with the path of your source repository as
|
||||||
argument:
|
argument:
|
||||||
|
|
||||||
``docker build .``
|
``sudo docker build .``
|
||||||
|
|
||||||
You can specify a repository and tag at which to save the new image if the
|
You can specify a repository and tag at which to save the new image if the
|
||||||
build succeeds:
|
build succeeds:
|
||||||
|
|
||||||
``docker build -t shykes/myapp .``
|
``sudo docker build -t shykes/myapp .``
|
||||||
|
|
||||||
Docker will run your steps one-by-one, committing the result if necessary,
|
Docker will run your steps one-by-one, committing the result if necessary,
|
||||||
before finally outputting the ID of your new image.
|
before finally outputting the ID of your new image.
|
||||||
|
@ -211,6 +213,15 @@ container created from the image.
|
||||||
The ``USER`` instruction sets the username or UID to use when running
|
The ``USER`` instruction sets the username or UID to use when running
|
||||||
the image.
|
the image.
|
||||||
|
|
||||||
|
3.11 WORKDIR
|
||||||
|
------------
|
||||||
|
|
||||||
|
``WORKDIR /path/to/workdir``
|
||||||
|
|
||||||
|
The ``WORKDIR`` instruction sets the working directory in which
|
||||||
|
the command given by ``CMD`` is executed.
|
||||||
|
|
||||||
|
|
||||||
4. Dockerfile Examples
|
4. Dockerfile Examples
|
||||||
======================
|
======================
|
||||||
|
|
||||||
|
|
|
@ -6,20 +6,23 @@
|
||||||
Port redirection
|
Port redirection
|
||||||
================
|
================
|
||||||
|
|
||||||
Docker can redirect public tcp ports to your container, so it can be reached over the network.
|
Docker can redirect public TCP ports to your container, so it can be
|
||||||
Port redirection is done on ``docker run`` using the -p flag.
|
reached over the network. Port redirection is done on ``docker run``
|
||||||
|
using the -p flag.
|
||||||
|
|
||||||
A port redirect is specified as PUBLIC:PRIVATE, where tcp port PUBLIC will be redirected to
|
A port redirect is specified as *PUBLIC:PRIVATE*, where TCP port
|
||||||
tcp port PRIVATE. As a special case, the public port can be omitted, in which case a random
|
*PUBLIC* will be redirected to TCP port *PRIVATE*. As a special case,
|
||||||
public port will be allocated.
|
the public port can be omitted, in which case a random public port
|
||||||
|
will be allocated.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# A random PUBLIC port is redirected to PRIVATE port 80 on the container
|
# A random PUBLIC port is redirected to PRIVATE port 80 on the container
|
||||||
docker run -p 80 <image> <cmd>
|
sudo docker run -p 80 <image> <cmd>
|
||||||
|
|
||||||
# PUBLIC port 80 is redirected to PRIVATE port 80
|
# PUBLIC port 80 is redirected to PRIVATE port 80
|
||||||
docker run -p 80:80 <image> <cmd>
|
sudo docker run -p 80:80 <image> <cmd>
|
||||||
|
|
||||||
|
|
||||||
Default port redirects can be built into a container with the EXPOSE build command.
|
Default port redirects can be built into a container with the
|
||||||
|
``EXPOSE`` build command.
|
||||||
|
|
|
@ -9,28 +9,32 @@ Using Puppet
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
Please note this is a community contributed installation path. The only 'official' installation is using the
|
Please note this is a community contributed installation path. The
|
||||||
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
|
only 'official' installation is using the :ref:`ubuntu_linux`
|
||||||
|
installation path. This version may sometimes be out of date.
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|
||||||
To use this guide you'll need a working installation of Puppet from `Puppetlabs <https://www.puppetlabs.com>`_ .
|
To use this guide you'll need a working installation of Puppet from
|
||||||
|
`Puppetlabs <https://www.puppetlabs.com>`_ .
|
||||||
|
|
||||||
The module also currently uses the official PPA so only works with Ubuntu.
|
The module also currently uses the official PPA so only works with Ubuntu.
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
|
|
||||||
The module is available on the `Puppet Forge <https://forge.puppetlabs.com/garethr/docker/>`_
|
The module is available on the `Puppet Forge
|
||||||
and can be installed using the built-in module tool.
|
<https://forge.puppetlabs.com/garethr/docker/>`_ and can be installed
|
||||||
|
using the built-in module tool.
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
puppet module install garethr/docker
|
puppet module install garethr/docker
|
||||||
|
|
||||||
It can also be found on `GitHub <https://www.github.com/garethr/garethr-docker>`_
|
It can also be found on `GitHub
|
||||||
if you would rather download the source.
|
<https://www.github.com/garethr/garethr-docker>`_ if you would rather
|
||||||
|
download the source.
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
-----
|
-----
|
||||||
|
|
|
@ -57,10 +57,10 @@ address of the registry's host, like this:
|
||||||
# Tag to create a repository with the full registry location.
|
# Tag to create a repository with the full registry location.
|
||||||
# The location (e.g. localhost.localdomain:5000) becomes
|
# The location (e.g. localhost.localdomain:5000) becomes
|
||||||
# a permanent part of the repository name
|
# a permanent part of the repository name
|
||||||
docker tag 0u812deadbeef localhost.localdomain:5000/repo_name
|
sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name
|
||||||
|
|
||||||
# Push the new repository to its home location on localhost
|
# Push the new repository to its home location on localhost
|
||||||
docker push localhost.localdomain:5000/repo_name
|
sudo docker push localhost.localdomain:5000/repo_name
|
||||||
|
|
||||||
Once a repository has your registry's host name as part of the tag,
|
Once a repository has your registry's host name as part of the tag,
|
||||||
you can push and pull it like any other repository, but it will
|
you can push and pull it like any other repository, but it will
|
||||||
|
@ -75,14 +75,14 @@ Search by name, namespace or description
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker search <value>
|
sudo docker search <value>
|
||||||
|
|
||||||
|
|
||||||
Download them simply by their name
|
Download them simply by their name
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker pull <value>
|
sudo docker pull <value>
|
||||||
|
|
||||||
|
|
||||||
Very similarly you can search for and browse the index online on
|
Very similarly you can search for and browse the index online on
|
||||||
|
@ -96,7 +96,7 @@ You can create a user on the central Docker Index online, or by running
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
docker login
|
sudo docker login
|
||||||
|
|
||||||
This will prompt you for a username, which will become a public
|
This will prompt you for a username, which will become a public
|
||||||
namespace for your public repositories.
|
namespace for your public repositories.
|
||||||
|
@ -115,7 +115,7 @@ your container to an image within your username namespace.
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# for example docker commit $CONTAINER_ID dhrp/kickassapp
|
# for example docker commit $CONTAINER_ID dhrp/kickassapp
|
||||||
docker commit <container_id> <username>/<repo_name>
|
sudo docker commit <container_id> <username>/<repo_name>
|
||||||
|
|
||||||
|
|
||||||
Pushing a container to its repository
|
Pushing a container to its repository
|
||||||
|
@ -129,4 +129,4 @@ Now you can commit this image to the repository
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# for example docker push dhrp/kickassapp
|
# for example docker push dhrp/kickassapp
|
||||||
docker push <username>/<repo_name>
|
sudo docker push <username>/<repo_name>
|
||||||
|
|
4
docs/theme/docker/layout.html
vendored
4
docs/theme/docker/layout.html
vendored
|
@ -70,8 +70,8 @@
|
||||||
<ul class="nav">
|
<ul class="nav">
|
||||||
<li id="nav-introduction"><a href="http://www.docker.io/" title="Docker Homepage">Home</a></li>
|
<li id="nav-introduction"><a href="http://www.docker.io/" title="Docker Homepage">Home</a></li>
|
||||||
<li id="nav-about"><a href="http://www.docker.io/about/" title="About">About</a></li>
|
<li id="nav-about"><a href="http://www.docker.io/about/" title="About">About</a></li>
|
||||||
<li id="nav-community"><a href="http://www.docker.io/community/" title="Community">Community</a></li>
|
|
||||||
<li id="nav-gettingstarted"><a href="http://www.docker.io/gettingstarted/">Getting started</a></li>
|
<li id="nav-gettingstarted"><a href="http://www.docker.io/gettingstarted/">Getting started</a></li>
|
||||||
|
<li id="nav-community"><a href="http://www.docker.io/community/" title="Community">Community</a></li>
|
||||||
<li id="nav-documentation" class="active"><a href="http://docs.docker.io/en/latest/">Documentation</a></li>
|
<li id="nav-documentation" class="active"><a href="http://docs.docker.io/en/latest/">Documentation</a></li>
|
||||||
<li id="nav-blog"><a href="http://blog.docker.io/" title="Docker Blog">Blog</a></li>
|
<li id="nav-blog"><a href="http://blog.docker.io/" title="Docker Blog">Blog</a></li>
|
||||||
<li id="nav-index"><a href="http://index.docker.io/" title="Docker Image Index, find images here">INDEX <img class="inline-icon" src="{{ pathto('_static/img/external-link-icon.png', 1) }}" title="external link"> </a></li>
|
<li id="nav-index"><a href="http://index.docker.io/" title="Docker Image Index, find images here">INDEX <img class="inline-icon" src="{{ pathto('_static/img/external-link-icon.png', 1) }}" title="external link"> </a></li>
|
||||||
|
@ -93,7 +93,7 @@
|
||||||
<div class="span12 titlebar">
|
<div class="span12 titlebar">
|
||||||
<!--<span class="pull-right" style="margin-left: 20px; font-size: 20px">{{version}}</span>-->
|
<!--<span class="pull-right" style="margin-left: 20px; font-size: 20px">{{version}}</span>-->
|
||||||
<div class="pull-right" id="fork-us" style="margin-top: 16px; margin-right: 16px;">
|
<div class="pull-right" id="fork-us" style="margin-top: 16px; margin-right: 16px;">
|
||||||
<a href="http://github.com/dotcloud/docker/"><img src="{{ pathto('_static/img/fork-us.png', 1) }}"> Fork us on Github</a>
|
<a href="https://github.com/dotcloud/docker/blob/master/docs/sources/{{ pagename }}.rst"><img src="{{ pathto('_static/img/fork-us.png', 1) }}"> Edit this page on Github</a>
|
||||||
</div>
|
</div>
|
||||||
<h1 class="pageheader"><a href="http://docs.docker.io/en/latest/" title="Documentation" style="color: white;">DOCUMENTATION</a></h1>
|
<h1 class="pageheader"><a href="http://docs.docker.io/en/latest/" title="Documentation" style="color: white;">DOCUMENTATION</a></h1>
|
||||||
|
|
||||||
|
|
16
docs/theme/docker/static/css/main.css
vendored
16
docs/theme/docker/static/css/main.css
vendored
|
@ -375,3 +375,19 @@ section.header {
|
||||||
table th {
|
table th {
|
||||||
text-align: left;
|
text-align: left;
|
||||||
}
|
}
|
||||||
|
h1:hover > a.headerlink,
|
||||||
|
h2:hover > a.headerlink,
|
||||||
|
h3:hover > a.headerlink,
|
||||||
|
h4:hover > a.headerlink,
|
||||||
|
h5:hover > a.headerlink,
|
||||||
|
h6:hover > a.headerlink,
|
||||||
|
dt:hover > a.headerlink {
|
||||||
|
visibility: visible;
|
||||||
|
}
|
||||||
|
.headerlink {
|
||||||
|
font-size: smaller;
|
||||||
|
color: #666;
|
||||||
|
font-weight: bold;
|
||||||
|
float: right;
|
||||||
|
visibility: hidden;
|
||||||
|
}
|
||||||
|
|
18
docs/theme/docker/static/css/main.less
vendored
18
docs/theme/docker/static/css/main.less
vendored
|
@ -486,4 +486,22 @@ section.header {
|
||||||
/* Misc fixes */
|
/* Misc fixes */
|
||||||
table th {
|
table th {
|
||||||
text-align: left;
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
h1:hover > a.headerlink,
|
||||||
|
h2:hover > a.headerlink,
|
||||||
|
h3:hover > a.headerlink,
|
||||||
|
h4:hover > a.headerlink,
|
||||||
|
h5:hover > a.headerlink,
|
||||||
|
h6:hover > a.headerlink,
|
||||||
|
dt:hover > a.headerlink {
|
||||||
|
visibility: visible;
|
||||||
|
}
|
||||||
|
|
||||||
|
.headerlink {
|
||||||
|
font-size: smaller;
|
||||||
|
color: #666;
|
||||||
|
font-weight: bold;
|
||||||
|
float: right;
|
||||||
|
visibility: hidden;
|
||||||
}
|
}
|
BIN
docs/theme/docker/static/favicon.png
vendored
BIN
docs/theme/docker/static/favicon.png
vendored
Binary file not shown.
Before Width: | Height: | Size: 404 B After Width: | Height: | Size: 1.4 KiB |
4
graph.go
4
graph.go
|
@ -323,9 +323,9 @@ func (graph *Graph) ByParent() (map[string][]*Image, error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if children, exists := byParent[parent.ID]; exists {
|
if children, exists := byParent[parent.ID]; exists {
|
||||||
byParent[parent.ID] = []*Image{image}
|
|
||||||
} else {
|
|
||||||
byParent[parent.ID] = append(children, image)
|
byParent[parent.ID] = append(children, image)
|
||||||
|
} else {
|
||||||
|
byParent[parent.ID] = []*Image{image}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return byParent, err
|
return byParent, err
|
||||||
|
|
|
@ -234,6 +234,45 @@ func TestDelete(t *testing.T) {
|
||||||
assertNImages(graph, t, 1)
|
assertNImages(graph, t, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestByParent(t *testing.T) {
|
||||||
|
archive1, _ := fakeTar()
|
||||||
|
archive2, _ := fakeTar()
|
||||||
|
archive3, _ := fakeTar()
|
||||||
|
|
||||||
|
graph := tempGraph(t)
|
||||||
|
defer os.RemoveAll(graph.Root)
|
||||||
|
parentImage := &Image{
|
||||||
|
ID: GenerateID(),
|
||||||
|
Comment: "parent",
|
||||||
|
Created: time.Now(),
|
||||||
|
Parent: "",
|
||||||
|
}
|
||||||
|
childImage1 := &Image{
|
||||||
|
ID: GenerateID(),
|
||||||
|
Comment: "child1",
|
||||||
|
Created: time.Now(),
|
||||||
|
Parent: parentImage.ID,
|
||||||
|
}
|
||||||
|
childImage2 := &Image{
|
||||||
|
ID: GenerateID(),
|
||||||
|
Comment: "child2",
|
||||||
|
Created: time.Now(),
|
||||||
|
Parent: parentImage.ID,
|
||||||
|
}
|
||||||
|
_ = graph.Register(nil, archive1, parentImage)
|
||||||
|
_ = graph.Register(nil, archive2, childImage1)
|
||||||
|
_ = graph.Register(nil, archive3, childImage2)
|
||||||
|
|
||||||
|
byParent, err := graph.ByParent()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
numChildren := len(byParent[parentImage.ID])
|
||||||
|
if numChildren != 2 {
|
||||||
|
t.Fatalf("Expected 2 children, found %d", numChildren)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func assertNImages(graph *Graph, t *testing.T, n int) {
|
func assertNImages(graph *Graph, t *testing.T, n int) {
|
||||||
if images, err := graph.All(); err != nil {
|
if images, err := graph.All(); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
@ -23,7 +23,7 @@ run add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu
|
||||||
run apt-get update
|
run apt-get update
|
||||||
# Packages required to checkout, build and upload docker
|
# Packages required to checkout, build and upload docker
|
||||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd curl
|
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd curl
|
||||||
run curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.1.linux-amd64.tar.gz
|
run curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz
|
||||||
run tar -C /usr/local -xzf /go.tar.gz
|
run tar -C /usr/local -xzf /go.tar.gz
|
||||||
run echo "export PATH=/usr/local/go/bin:$PATH" > /.bashrc
|
run echo "export PATH=/usr/local/go/bin:$PATH" > /.bashrc
|
||||||
run echo "export PATH=/usr/local/go/bin:$PATH" > /.bash_profile
|
run echo "export PATH=/usr/local/go/bin:$PATH" > /.bash_profile
|
||||||
|
|
|
@ -40,6 +40,9 @@ lxc.console = none
|
||||||
# no controlling tty at all
|
# no controlling tty at all
|
||||||
lxc.tty = 1
|
lxc.tty = 1
|
||||||
|
|
||||||
|
{{if .Config.Privileged}}
|
||||||
|
lxc.cgroup.devices.allow = a
|
||||||
|
{{else}}
|
||||||
# no implicit access to devices
|
# no implicit access to devices
|
||||||
lxc.cgroup.devices.deny = a
|
lxc.cgroup.devices.deny = a
|
||||||
|
|
||||||
|
@ -69,7 +72,7 @@ lxc.cgroup.devices.allow = c 10:200 rwm
|
||||||
|
|
||||||
# rtc
|
# rtc
|
||||||
#lxc.cgroup.devices.allow = c 254:0 rwm
|
#lxc.cgroup.devices.allow = c 254:0 rwm
|
||||||
|
{{end}}
|
||||||
|
|
||||||
# standard mount point
|
# standard mount point
|
||||||
# WARNING: procfs is a known attack vector and should probably be disabled
|
# WARNING: procfs is a known attack vector and should probably be disabled
|
||||||
|
@ -95,11 +98,15 @@ lxc.mount.entry = {{$realPath}} {{$ROOTFS}}/{{$virtualPath}} none bind,{{ if ind
|
||||||
{{end}}
|
{{end}}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
|
{{if .Config.Privileged}}
|
||||||
|
# retain all capabilities; no lxc.cap.drop line
|
||||||
|
{{else}}
|
||||||
# drop linux capabilities (apply mainly to the user root in the container)
|
# drop linux capabilities (apply mainly to the user root in the container)
|
||||||
# (Note: 'lxc.cap.keep' is coming soon and should replace this under the
|
# (Note: 'lxc.cap.keep' is coming soon and should replace this under the
|
||||||
# security principle 'deny all unless explicitly permitted', see
|
# security principle 'deny all unless explicitly permitted', see
|
||||||
# http://sourceforge.net/mailarchive/message.php?msg_id=31054627 )
|
# http://sourceforge.net/mailarchive/message.php?msg_id=31054627 )
|
||||||
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setfcap setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
|
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setfcap setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
|
||||||
|
{{end}}
|
||||||
|
|
||||||
# limits
|
# limits
|
||||||
{{if .Config.Memory}}
|
{{if .Config.Memory}}
|
||||||
|
|
|
@ -162,7 +162,6 @@ func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]s
|
||||||
// Check if an image exists in the Registry
|
// Check if an image exists in the Registry
|
||||||
func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool {
|
func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool {
|
||||||
|
|
||||||
|
|
||||||
req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
|
req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
|
@ -230,7 +229,8 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
|
||||||
}
|
}
|
||||||
for _, host := range registries {
|
for _, host := range registries {
|
||||||
endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
|
endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
|
||||||
req, err := r.opaqueRequest("GET", endpoint, nil)
|
req, err := r.reqFactory.NewRequest("GET", endpoint, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -263,12 +263,11 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Registry) GetRepositoryData(indexEp, remote string) (*RepositoryData, error) {
|
func (r *Registry) GetRepositoryData(indexEp, remote string) (*RepositoryData, error) {
|
||||||
|
|
||||||
repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote)
|
repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote)
|
||||||
|
|
||||||
utils.Debugf("[registry] Calling GET %s", repositoryTarget)
|
utils.Debugf("[registry] Calling GET %s", repositoryTarget)
|
||||||
|
|
||||||
req, err := r.opaqueRequest("GET", repositoryTarget, nil)
|
req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -426,22 +425,14 @@ func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registr
|
||||||
return tarsumLayer.Sum(jsonRaw), nil
|
return tarsumLayer.Sum(jsonRaw), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Registry) opaqueRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
|
|
||||||
req, err := r.reqFactory.NewRequest(method, urlStr, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.URL.Opaque = strings.Replace(urlStr, req.URL.Scheme+":", "", 1)
|
|
||||||
return req, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// push a tag on the registry.
|
// push a tag on the registry.
|
||||||
// Remote has the format '<user>/<repo>
|
// Remote has the format '<user>/<repo>
|
||||||
func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
|
func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
|
||||||
// "jsonify" the string
|
// "jsonify" the string
|
||||||
revision = "\"" + revision + "\""
|
revision = "\"" + revision + "\""
|
||||||
|
path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag)
|
||||||
|
|
||||||
req, err := r.opaqueRequest("PUT", registry+"repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
|
req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -480,11 +471,10 @@ func (r *Registry) PushImageJSONIndex(indexEp, remote string, imgList []*ImgData
|
||||||
if validate {
|
if validate {
|
||||||
suffix = "images"
|
suffix = "images"
|
||||||
}
|
}
|
||||||
|
|
||||||
u := fmt.Sprintf("%srepositories/%s/%s", indexEp, remote, suffix)
|
u := fmt.Sprintf("%srepositories/%s/%s", indexEp, remote, suffix)
|
||||||
utils.Debugf("[registry] PUT %s", u)
|
utils.Debugf("[registry] PUT %s", u)
|
||||||
utils.Debugf("Image list pushed to index:\n%s\n", imgListJSON)
|
utils.Debugf("Image list pushed to index:\n%s\n", imgListJSON)
|
||||||
req, err := r.opaqueRequest("PUT", u, bytes.NewReader(imgListJSON))
|
req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -504,7 +494,7 @@ func (r *Registry) PushImageJSONIndex(indexEp, remote string, imgList []*ImgData
|
||||||
// Redirect if necessary
|
// Redirect if necessary
|
||||||
for res.StatusCode >= 300 && res.StatusCode < 400 {
|
for res.StatusCode >= 300 && res.StatusCode < 400 {
|
||||||
utils.Debugf("Redirected to %s\n", res.Header.Get("Location"))
|
utils.Debugf("Redirected to %s\n", res.Header.Get("Location"))
|
||||||
req, err = r.opaqueRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
|
req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -343,4 +343,4 @@ func TestWait(t *testing.T) {
|
||||||
<-c
|
<-c
|
||||||
}
|
}
|
||||||
|
|
||||||
//*/
|
//*/
|
||||||
|
|
38
server.go
38
server.go
|
@ -76,7 +76,7 @@ func (srv *Server) ContainerKill(name string) error {
|
||||||
if err := container.Kill(); err != nil {
|
if err := container.Kill(); err != nil {
|
||||||
return fmt.Errorf("Error killing container %s: %s", name, err)
|
return fmt.Errorf("Error killing container %s: %s", name, err)
|
||||||
}
|
}
|
||||||
srv.LogEvent("kill", name)
|
srv.LogEvent("kill", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("No such container: %s", name)
|
return fmt.Errorf("No such container: %s", name)
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ func (srv *Server) ContainerExport(name string, out io.Writer) error {
|
||||||
if _, err := io.Copy(out, data); err != nil {
|
if _, err := io.Copy(out, data); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
srv.LogEvent("export", name)
|
srv.LogEvent("export", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("No such container: %s", name)
|
return fmt.Errorf("No such container: %s", name)
|
||||||
|
@ -241,6 +241,8 @@ func (srv *Server) Images(all bool, filter string) ([]APIImages, error) {
|
||||||
outs = append(outs, out)
|
outs = append(outs, out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sortImagesByCreationAndTag(outs)
|
||||||
return outs, nil
|
return outs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -828,11 +830,17 @@ func (srv *Server) ContainerCreate(config *Config) (string, error) {
|
||||||
container, err := b.Create(config)
|
container, err := b.Create(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if srv.runtime.graph.IsNotExist(err) {
|
if srv.runtime.graph.IsNotExist(err) {
|
||||||
return "", fmt.Errorf("No such image: %s", config.Image)
|
|
||||||
|
_, tag := utils.ParseRepositoryTag(config.Image)
|
||||||
|
if tag == "" {
|
||||||
|
tag = DEFAULTTAG
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag)
|
||||||
}
|
}
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
srv.LogEvent("create", container.ShortID())
|
srv.LogEvent("create", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||||
return container.ShortID(), nil
|
return container.ShortID(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -841,7 +849,7 @@ func (srv *Server) ContainerRestart(name string, t int) error {
|
||||||
if err := container.Restart(t); err != nil {
|
if err := container.Restart(t); err != nil {
|
||||||
return fmt.Errorf("Error restarting container %s: %s", name, err)
|
return fmt.Errorf("Error restarting container %s: %s", name, err)
|
||||||
}
|
}
|
||||||
srv.LogEvent("restart", name)
|
srv.LogEvent("restart", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("No such container: %s", name)
|
return fmt.Errorf("No such container: %s", name)
|
||||||
}
|
}
|
||||||
|
@ -861,7 +869,7 @@ func (srv *Server) ContainerDestroy(name string, removeVolume bool) error {
|
||||||
if err := srv.runtime.Destroy(container); err != nil {
|
if err := srv.runtime.Destroy(container); err != nil {
|
||||||
return fmt.Errorf("Error destroying container %s: %s", name, err)
|
return fmt.Errorf("Error destroying container %s: %s", name, err)
|
||||||
}
|
}
|
||||||
srv.LogEvent("destroy", name)
|
srv.LogEvent("destroy", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||||
|
|
||||||
if removeVolume {
|
if removeVolume {
|
||||||
// Retrieve all volumes from all remaining containers
|
// Retrieve all volumes from all remaining containers
|
||||||
|
@ -928,7 +936,7 @@ func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*imgs = append(*imgs, APIRmi{Deleted: utils.TruncateID(id)})
|
*imgs = append(*imgs, APIRmi{Deleted: utils.TruncateID(id)})
|
||||||
srv.LogEvent("delete", utils.TruncateID(id))
|
srv.LogEvent("delete", utils.TruncateID(id), "")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -955,11 +963,11 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
|
||||||
//If delete by id, see if the id belong only to one repository
|
//If delete by id, see if the id belong only to one repository
|
||||||
if strings.Contains(img.ID, repoName) && tag == "" {
|
if strings.Contains(img.ID, repoName) && tag == "" {
|
||||||
for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] {
|
for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] {
|
||||||
parsedRepo := strings.Split(repoAndTag, ":")[0]
|
parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
|
||||||
if strings.Contains(img.ID, repoName) {
|
if strings.Contains(img.ID, repoName) {
|
||||||
repoName = parsedRepo
|
repoName = parsedRepo
|
||||||
if len(srv.runtime.repositories.ByID()[img.ID]) == 1 && len(strings.Split(repoAndTag, ":")) > 1 {
|
if len(srv.runtime.repositories.ByID()[img.ID]) == 1 && len(parsedTag) > 1 {
|
||||||
tag = strings.Split(repoAndTag, ":")[1]
|
tag = parsedTag
|
||||||
}
|
}
|
||||||
} else if repoName != parsedRepo {
|
} else if repoName != parsedRepo {
|
||||||
// the id belongs to multiple repos, like base:latest and user:test,
|
// the id belongs to multiple repos, like base:latest and user:test,
|
||||||
|
@ -975,7 +983,7 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
|
||||||
}
|
}
|
||||||
if tagDeleted {
|
if tagDeleted {
|
||||||
imgs = append(imgs, APIRmi{Untagged: img.ShortID()})
|
imgs = append(imgs, APIRmi{Untagged: img.ShortID()})
|
||||||
srv.LogEvent("untag", img.ShortID())
|
srv.LogEvent("untag", img.ShortID(), "")
|
||||||
}
|
}
|
||||||
if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
|
if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
|
||||||
if err := srv.deleteImageAndChildren(img.ID, &imgs); err != nil {
|
if err := srv.deleteImageAndChildren(img.ID, &imgs); err != nil {
|
||||||
|
@ -1042,7 +1050,7 @@ func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
|
||||||
if err := container.Start(hostConfig); err != nil {
|
if err := container.Start(hostConfig); err != nil {
|
||||||
return fmt.Errorf("Error starting container %s: %s", name, err)
|
return fmt.Errorf("Error starting container %s: %s", name, err)
|
||||||
}
|
}
|
||||||
srv.LogEvent("start", name)
|
srv.LogEvent("start", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("No such container: %s", name)
|
return fmt.Errorf("No such container: %s", name)
|
||||||
}
|
}
|
||||||
|
@ -1054,7 +1062,7 @@ func (srv *Server) ContainerStop(name string, t int) error {
|
||||||
if err := container.Stop(t); err != nil {
|
if err := container.Stop(t); err != nil {
|
||||||
return fmt.Errorf("Error stopping container %s: %s", name, err)
|
return fmt.Errorf("Error stopping container %s: %s", name, err)
|
||||||
}
|
}
|
||||||
srv.LogEvent("stop", name)
|
srv.LogEvent("stop", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("No such container: %s", name)
|
return fmt.Errorf("No such container: %s", name)
|
||||||
}
|
}
|
||||||
|
@ -1222,9 +1230,9 @@ func (srv *Server) HTTPRequestFactory() *utils.HTTPRequestFactory {
|
||||||
return srv.reqFactory
|
return srv.reqFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *Server) LogEvent(action, id string) {
|
func (srv *Server) LogEvent(action, id, from string) {
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
jm := utils.JSONMessage{Status: action, ID: id, Time: now}
|
jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
|
||||||
srv.events = append(srv.events, jm)
|
srv.events = append(srv.events, jm)
|
||||||
for _, c := range srv.listeners {
|
for _, c := range srv.listeners {
|
||||||
select { // non blocking channel
|
select { // non blocking channel
|
||||||
|
|
|
@ -206,6 +206,7 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestContainerTop(t *testing.T) {
|
func TestContainerTop(t *testing.T) {
|
||||||
|
t.Skip("Fixme. Skipping test for now. Reported error: 'server_test.go:236: Expected 2 processes, found 1.'")
|
||||||
runtime := mkRuntime(t)
|
runtime := mkRuntime(t)
|
||||||
srv := &Server{runtime: runtime}
|
srv := &Server{runtime: runtime}
|
||||||
defer nuke(runtime)
|
defer nuke(runtime)
|
||||||
|
@ -317,23 +318,23 @@ func TestLogEvent(t *testing.T) {
|
||||||
listeners: make(map[string]chan utils.JSONMessage),
|
listeners: make(map[string]chan utils.JSONMessage),
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.LogEvent("fakeaction", "fakeid")
|
srv.LogEvent("fakeaction", "fakeid", "fakeimage")
|
||||||
|
|
||||||
listener := make(chan utils.JSONMessage)
|
listener := make(chan utils.JSONMessage)
|
||||||
srv.Lock()
|
srv.Lock()
|
||||||
srv.listeners["test"] = listener
|
srv.listeners["test"] = listener
|
||||||
srv.Unlock()
|
srv.Unlock()
|
||||||
|
|
||||||
srv.LogEvent("fakeaction2", "fakeid")
|
srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
|
||||||
|
|
||||||
if len(srv.events) != 2 {
|
if len(srv.events) != 2 {
|
||||||
t.Fatalf("Expected 2 events, found %d", len(srv.events))
|
t.Fatalf("Expected 2 events, found %d", len(srv.events))
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
srv.LogEvent("fakeaction3", "fakeid")
|
srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
srv.LogEvent("fakeaction4", "fakeid")
|
srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
|
setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
|
||||||
|
|
36
sorter.go
Normal file
36
sorter.go
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
type imageSorter struct {
|
||||||
|
images []APIImages
|
||||||
|
by func(i1, i2 *APIImages) bool // Closure used in the Less method.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len is part of sort.Interface.
|
||||||
|
func (s *imageSorter) Len() int {
|
||||||
|
return len(s.images)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap is part of sort.Interface.
|
||||||
|
func (s *imageSorter) Swap(i, j int) {
|
||||||
|
s.images[i], s.images[j] = s.images[j], s.images[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less is part of sort.Interface. It is implemented by calling the "by" closure in the sorter.
|
||||||
|
func (s *imageSorter) Less(i, j int) bool {
|
||||||
|
return s.by(&s.images[i], &s.images[j])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort []ApiImages by most recent creation date and tag name.
|
||||||
|
func sortImagesByCreationAndTag(images []APIImages) {
|
||||||
|
creationAndTag := func(i1, i2 *APIImages) bool {
|
||||||
|
return i1.Created > i2.Created || (i1.Created == i2.Created && i2.Tag > i1.Tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
sorter := &imageSorter{
|
||||||
|
images: images,
|
||||||
|
by: creationAndTag}
|
||||||
|
|
||||||
|
sort.Sort(sorter)
|
||||||
|
}
|
57
sorter_test.go
Normal file
57
sorter_test.go
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestServerListOrderedImagesByCreationDate(t *testing.T) {
|
||||||
|
runtime := mkRuntime(t)
|
||||||
|
defer nuke(runtime)
|
||||||
|
|
||||||
|
archive, err := fakeTar()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = runtime.graph.Create(archive, nil, "Testing", "", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := &Server{runtime: runtime}
|
||||||
|
|
||||||
|
images, err := srv.Images(true, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if images[0].Created < images[1].Created {
|
||||||
|
t.Error("Expected []APIImges to be ordered by most recent creation date.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
|
||||||
|
runtime := mkRuntime(t)
|
||||||
|
defer nuke(runtime)
|
||||||
|
|
||||||
|
archive, err := fakeTar()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
image, err := runtime.graph.Create(archive, nil, "Testing", "", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := &Server{runtime: runtime}
|
||||||
|
srv.ContainerTag(image.ID, "repo", "foo", false)
|
||||||
|
srv.ContainerTag(image.ID, "repo", "bar", false)
|
||||||
|
|
||||||
|
images, err := srv.Images(true, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if images[0].Created != images[1].Created || images[0].Tag >= images[1].Tag {
|
||||||
|
t.Error("Expected []APIImges to be ordered by most recent creation date and tag name.")
|
||||||
|
}
|
||||||
|
}
|
11
sysinit.go
11
sysinit.go
|
@ -22,6 +22,15 @@ func setupNetworking(gw string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Setup working directory
|
||||||
|
func setupWorkingDirectory(workdir string) {
|
||||||
|
if workdir == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
syscall.Chdir(workdir)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Takes care of dropping privileges to the desired user
|
// Takes care of dropping privileges to the desired user
|
||||||
func changeUser(u string) {
|
func changeUser(u string) {
|
||||||
if u == "" {
|
if u == "" {
|
||||||
|
@ -83,6 +92,7 @@ func SysInit() {
|
||||||
}
|
}
|
||||||
var u = flag.String("u", "", "username or uid")
|
var u = flag.String("u", "", "username or uid")
|
||||||
var gw = flag.String("g", "", "gateway address")
|
var gw = flag.String("g", "", "gateway address")
|
||||||
|
var workdir = flag.String("w", "", "workdir")
|
||||||
|
|
||||||
var flEnv ListOpts
|
var flEnv ListOpts
|
||||||
flag.Var(&flEnv, "e", "Set environment variables")
|
flag.Var(&flEnv, "e", "Set environment variables")
|
||||||
|
@ -91,6 +101,7 @@ func SysInit() {
|
||||||
|
|
||||||
cleanupEnv(flEnv)
|
cleanupEnv(flEnv)
|
||||||
setupNetworking(*gw)
|
setupNetworking(*gw)
|
||||||
|
setupWorkingDirectory(*workdir)
|
||||||
changeUser(*u)
|
changeUser(*u)
|
||||||
executeProgram(flag.Arg(0), flag.Args())
|
executeProgram(flag.Arg(0), flag.Args())
|
||||||
}
|
}
|
||||||
|
|
7
testing/Vagrantfile
vendored
7
testing/Vagrantfile
vendored
|
@ -22,7 +22,10 @@ Vagrant::Config.run do |config|
|
||||||
|
|
||||||
# Deploy buildbot and its dependencies if it was not done
|
# Deploy buildbot and its dependencies if it was not done
|
||||||
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
|
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
|
||||||
pkg_cmd = "apt-get update -qq; apt-get install -q -y linux-image-generic-lts-raring; "
|
# Add memory limitation capabilities
|
||||||
|
pkg_cmd = 'sed -Ei \'s/^(GRUB_CMDLINE_LINUX_DEFAULT)=.+/\\1="cgroup_enable=memory swapaccount=1 quiet"/\' /etc/default/grub; '
|
||||||
|
# Install new kernel
|
||||||
|
pkg_cmd << "apt-get update -qq; apt-get install -q -y linux-image-generic-lts-raring; "
|
||||||
# Deploy buildbot CI
|
# Deploy buildbot CI
|
||||||
pkg_cmd << "apt-get install -q -y python-dev python-pip supervisor; " \
|
pkg_cmd << "apt-get install -q -y python-dev python-pip supervisor; " \
|
||||||
"pip install -r #{CFG_PATH}/requirements.txt; " \
|
"pip install -r #{CFG_PATH}/requirements.txt; " \
|
||||||
|
@ -35,7 +38,7 @@ Vagrant::Config.run do |config|
|
||||||
# Install docker dependencies
|
# Install docker dependencies
|
||||||
pkg_cmd << "apt-get install -q -y python-software-properties; " \
|
pkg_cmd << "apt-get install -q -y python-software-properties; " \
|
||||||
"add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu; apt-get update -qq; " \
|
"add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu; apt-get update -qq; " \
|
||||||
"DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc git golang-stable aufs-tools make; "
|
"DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc git mercurial golang-stable aufs-tools make; "
|
||||||
# Activate new kernel
|
# Activate new kernel
|
||||||
pkg_cmd << "shutdown -r +1; "
|
pkg_cmd << "shutdown -r +1; "
|
||||||
config.vm.provision :shell, :inline => pkg_cmd
|
config.vm.provision :shell, :inline => pkg_cmd
|
||||||
|
|
|
@ -66,8 +66,9 @@ c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
|
||||||
# Docker coverage test
|
# Docker coverage test
|
||||||
coverage_cmd = ('GOPATH=`pwd` go get -d github.com/dotcloud/docker\n'
|
coverage_cmd = ('GOPATH=`pwd` go get -d github.com/dotcloud/docker\n'
|
||||||
'GOPATH=`pwd` go get github.com/axw/gocov/gocov\n'
|
'GOPATH=`pwd` go get github.com/axw/gocov/gocov\n'
|
||||||
'sudo -E GOPATH=`pwd` ./bin/gocov test github.com/dotcloud/docker | '
|
'sudo -E GOPATH=`pwd` ./bin/gocov test -deps -exclude-goroot -v'
|
||||||
'./bin/gocov report')
|
' -exclude github.com/gorilla/context,github.com/gorilla/mux,github.com/kr/pty,'
|
||||||
|
'code.google.com/p/go.net/websocket github.com/dotcloud/docker | ./bin/gocov report')
|
||||||
factory = BuildFactory()
|
factory = BuildFactory()
|
||||||
factory.addStep(ShellCommand(description='Coverage',logEnviron=False,usePTY=True,
|
factory.addStep(ShellCommand(description='Coverage',logEnviron=False,usePTY=True,
|
||||||
command=coverage_cmd))
|
command=coverage_cmd))
|
||||||
|
|
3
utils.go
3
utils.go
|
@ -132,6 +132,9 @@ func MergeConfig(userConf, imageConf *Config) {
|
||||||
if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 {
|
if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 {
|
||||||
userConf.Entrypoint = imageConf.Entrypoint
|
userConf.Entrypoint = imageConf.Entrypoint
|
||||||
}
|
}
|
||||||
|
if userConf.WorkingDir == "" {
|
||||||
|
userConf.WorkingDir = imageConf.WorkingDir
|
||||||
|
}
|
||||||
if userConf.VolumesFrom == "" {
|
if userConf.VolumesFrom == "" {
|
||||||
userConf.VolumesFrom = imageConf.VolumesFrom
|
userConf.VolumesFrom = imageConf.VolumesFrom
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"github.com/dotcloud/tar"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
|
@ -622,6 +622,7 @@ type JSONMessage struct {
|
||||||
Progress string `json:"progress,omitempty"`
|
Progress string `json:"progress,omitempty"`
|
||||||
ErrorMessage string `json:"error,omitempty"` //deprecated
|
ErrorMessage string `json:"error,omitempty"` //deprecated
|
||||||
ID string `json:"id,omitempty"`
|
ID string `json:"id,omitempty"`
|
||||||
|
From string `json:"from,omitempty"`
|
||||||
Time int64 `json:"time,omitempty"`
|
Time int64 `json:"time,omitempty"`
|
||||||
Error *JSONError `json:"errorDetail,omitempty"`
|
Error *JSONError `json:"errorDetail,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -644,14 +645,17 @@ func (jm *JSONMessage) Display(out io.Writer) error {
|
||||||
}
|
}
|
||||||
return jm.Error
|
return jm.Error
|
||||||
}
|
}
|
||||||
|
fmt.Fprintf(out, "%c[2K\r", 27)
|
||||||
if jm.Time != 0 {
|
if jm.Time != 0 {
|
||||||
fmt.Fprintf(out, "[%s] ", time.Unix(jm.Time, 0))
|
fmt.Fprintf(out, "[%s] ", time.Unix(jm.Time, 0))
|
||||||
}
|
}
|
||||||
if jm.ID != "" {
|
if jm.ID != "" {
|
||||||
fmt.Fprintf(out, "%s: ", jm.ID)
|
fmt.Fprintf(out, "%s: ", jm.ID)
|
||||||
}
|
}
|
||||||
|
if jm.From != "" {
|
||||||
|
fmt.Fprintf(out, "(from %s) ", jm.From)
|
||||||
|
}
|
||||||
if jm.Progress != "" {
|
if jm.Progress != "" {
|
||||||
fmt.Fprintf(out, "%c[2K", 27)
|
|
||||||
fmt.Fprintf(out, "%s %s\r", jm.Status, jm.Progress)
|
fmt.Fprintf(out, "%s %s\r", jm.Status, jm.Progress)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(out, "%s\r\n", jm.Status)
|
fmt.Fprintf(out, "%s\r\n", jm.Status)
|
||||||
|
|
Loading…
Add table
Reference in a new issue