1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

rebase master

This commit is contained in:
Victor Vieux 2013-07-19 14:48:32 +00:00
commit 2e0e455fa6
36 changed files with 1204 additions and 355 deletions

View file

@ -1,5 +1,18 @@
# Changelog
## 0.5.0 (2013-07-17)
+ Runtime: List all processes running inside a container with 'docker top'
+ Runtime: Host directories can be mounted as volumes with 'docker run -v'
+ Runtime: Containers can expose public UDP ports (eg, '-p 123/udp')
+ Runtime: Optionally specify an exact public port (eg. '-p 80:4500')
+ Registry: New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries
+ Builder: ENTRYPOINT instruction sets a default binary entry point to a container
+ Builder: VOLUME instruction marks a part of the container as persistent data
* Builder: 'docker build' displays the full output of a build by default
* Runtime: 'docker login' supports additional options
- Runtime: Dont save a container's hostname when committing an image.
- Registry: Fix issues when uploading images to a private registry
## 0.4.8 (2013-07-01)
+ Builder: New build operation ENTRYPOINT adds an executable entry point to the container.
- Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID.

View file

@ -49,6 +49,7 @@ whichrelease:
release: $(BINRELEASE)
s3cmd -P put $(BINRELEASE) s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-$(RELEASE_VERSION).tgz
s3cmd -P put docker-latest.tgz s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-latest.tgz
s3cmd -P put $(SRCRELEASE)/bin/docker s3://get.docker.io/builds/`uname -s`/`uname -m`/docker
srcrelease: $(SRCRELEASE)
deps: $(DOCKER_DIR)

44
api.go
View file

@ -47,21 +47,22 @@ func parseMultipartForm(r *http.Request) error {
}
func httpError(w http.ResponseWriter, err error) {
statusCode := http.StatusInternalServerError
if strings.HasPrefix(err.Error(), "No such") {
http.Error(w, err.Error(), http.StatusNotFound)
statusCode = http.StatusNotFound
} else if strings.HasPrefix(err.Error(), "Bad parameter") {
http.Error(w, err.Error(), http.StatusBadRequest)
statusCode = http.StatusBadRequest
} else if strings.HasPrefix(err.Error(), "Conflict") {
http.Error(w, err.Error(), http.StatusConflict)
statusCode = http.StatusConflict
} else if strings.HasPrefix(err.Error(), "Impossible") {
http.Error(w, err.Error(), http.StatusNotAcceptable)
statusCode = http.StatusNotAcceptable
} else if strings.HasPrefix(err.Error(), "Wrong login/password") {
http.Error(w, err.Error(), http.StatusUnauthorized)
statusCode = http.StatusUnauthorized
} else if strings.Contains(err.Error(), "hasn't been activated") {
http.Error(w, err.Error(), http.StatusForbidden)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
statusCode = http.StatusForbidden
}
utils.Debugf("[error %d] %s", statusCode, err)
http.Error(w, err.Error(), statusCode)
}
func writeJSON(w http.ResponseWriter, b []byte) {
@ -250,6 +251,23 @@ func getContainersChanges(srv *Server, version float64, w http.ResponseWriter, r
return nil
}
func getContainersTop(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
name := vars["name"]
procsStr, err := srv.ContainerTop(name)
if err != nil {
return err
}
b, err := json.Marshal(procsStr)
if err != nil {
return err
}
writeJSON(w, b)
return nil
}
func getContainersJSON(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
@ -756,6 +774,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
}
remoteURL := r.FormValue("remote")
repoName := r.FormValue("t")
rawSuppressOutput := r.FormValue("q")
tag := ""
if strings.Contains(repoName, ":") {
remoteParts := strings.Split(repoName, ":")
@ -802,7 +821,13 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
}
context = c
}
b := NewBuildFile(srv, utils.NewWriteFlusher(w))
suppressOutput, err := getBoolParam(rawSuppressOutput)
if err != nil {
return err
}
b := NewBuildFile(srv, utils.NewWriteFlusher(w), !suppressOutput)
id, err := b.Build(context)
if err != nil {
fmt.Fprintf(w, "Error build: %s\n", err)
@ -842,6 +867,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
"/containers/{name:.*}/export": getContainersExport,
"/containers/{name:.*}/changes": getContainersChanges,
"/containers/{name:.*}/json": getContainersByName,
"/containers/{name:.*}/top": getContainersTop,
},
"POST": {
"/auth": postAuth,

View file

@ -26,6 +26,13 @@ type APIInfo struct {
SwapLimit bool `json:",omitempty"`
}
type APITop struct {
PID string
Tty string
Time string
Cmd string
}
type APIRmi struct {
Deleted string `json:",omitempty"`
Untagged string `json:",omitempty"`

View file

@ -410,6 +410,61 @@ func TestGetContainersChanges(t *testing.T) {
}
}
func TestGetContainersTop(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
builder := NewBuilder(runtime)
container, err := builder.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/sh", "-c", "sleep 2"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
t.Fatal(err)
}
// Give some time to the process to start
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Errorf("Container should be running")
}
r := httptest.NewRecorder()
if err := getContainersTop(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
procs := []APITop{}
if err := json.Unmarshal(r.Body.Bytes(), &procs); err != nil {
t.Fatal(err)
}
if len(procs) != 2 {
t.Fatalf("Expected 2 processes, found %d.", len(procs))
}
if procs[0].Cmd != "sh" && procs[0].Cmd != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs[0].Cmd)
}
if procs[1].Cmd != "sh" && procs[1].Cmd != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs[1].Cmd)
}
}
func TestGetContainersByName(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)

View file

@ -28,8 +28,8 @@ type buildFile struct {
maintainer string
config *Config
context string
verbose bool
lastContainer *Container
tmpContainers map[string]struct{}
tmpImages map[string]struct{}
@ -173,6 +173,27 @@ func (b *buildFile) CmdEntrypoint(args string) error {
return nil
}
func (b *buildFile) CmdVolume(args string) error {
if args == "" {
return fmt.Errorf("Volume cannot be empty")
}
var volume []string
if err := json.Unmarshal([]byte(args), &volume); err != nil {
volume = []string{args}
}
if b.config.Volumes == nil {
b.config.Volumes = NewPathOpts()
}
for _, v := range volume {
b.config.Volumes[v] = struct{}{}
}
if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
return err
}
return nil
}
func (b *buildFile) addRemote(container *Container, orig, dest string) error {
file, err := utils.Download(orig, ioutil.Discard)
if err != nil {
@ -233,7 +254,6 @@ func (b *buildFile) CmdAdd(args string) error {
return err
}
b.tmpContainers[container.ID] = struct{}{}
b.lastContainer = container
if err := container.EnsureMounted(); err != nil {
return err
@ -269,7 +289,6 @@ func (b *buildFile) run() (string, error) {
return "", err
}
b.tmpContainers[c.ID] = struct{}{}
b.lastContainer = c
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(c.ID))
// override the entry point that may have been picked up from the base image
@ -282,6 +301,13 @@ func (b *buildFile) run() (string, error) {
return "", err
}
if b.verbose {
err = <-c.Attach(nil, nil, b.out, b.out)
if err != nil {
return "", err
}
}
// Wait for it to finish
if ret := c.Wait(); ret != 0 {
return "", fmt.Errorf("The command %v returned a non-zero code: %d", b.config.Cmd, ret)
@ -316,7 +342,6 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
return err
}
b.tmpContainers[container.ID] = struct{}{}
b.lastContainer = container
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
id = container.ID
if err := container.EnsureMounted(); err != nil {
@ -344,29 +369,6 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
}
func (b *buildFile) Build(context io.Reader) (string, error) {
defer func() {
// If we have an error and a container, the display the logs
if b.lastContainer != nil {
fmt.Fprintf(b.out, "******** Logs from last container (%s) *******\n", b.lastContainer.ShortID())
cLog, err := b.lastContainer.ReadLog("stdout")
if err != nil {
utils.Debugf("Error reading logs (stdout): %s", err)
}
if _, err := io.Copy(b.out, cLog); err != nil {
utils.Debugf("Error streaming logs (stdout): %s", err)
}
cLog, err = b.lastContainer.ReadLog("stderr")
if err != nil {
utils.Debugf("Error reading logs (stderr): %s", err)
}
if _, err := io.Copy(b.out, cLog); err != nil {
utils.Debugf("Error streaming logs (stderr): %s", err)
}
fmt.Fprintf(b.out, "************* End of logs for %s *************\n", b.lastContainer.ShortID())
}
}()
// FIXME: @creack any reason for using /tmp instead of ""?
// FIXME: @creack "name" is a terrible variable name
name, err := ioutil.TempDir("/tmp", "docker-build")
@ -419,7 +421,6 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
return "", ret.(error)
}
b.lastContainer = nil
fmt.Fprintf(b.out, " ---> %v\n", utils.TruncateID(b.image))
}
if b.image != "" {
@ -429,7 +430,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
return "", fmt.Errorf("An error occured during the build\n")
}
func NewBuildFile(srv *Server, out io.Writer) BuildFile {
func NewBuildFile(srv *Server, out io.Writer, verbose bool) BuildFile {
return &buildFile{
builder: NewBuilder(srv.runtime),
runtime: srv.runtime,
@ -438,5 +439,6 @@ func NewBuildFile(srv *Server, out io.Writer) BuildFile {
out: out,
tmpContainers: make(map[string]struct{}),
tmpImages: make(map[string]struct{}),
verbose: verbose,
}
}

View file

@ -87,6 +87,15 @@ run [ "$FOO" = "BAR" ]
from %s
ENTRYPOINT /bin/echo
CMD Hello world
`,
nil,
},
{
`
from %s
VOLUME /test
CMD Hello world
`,
nil,
},
@ -96,18 +105,112 @@ CMD Hello world
func TestBuild(t *testing.T) {
for _, ctx := range testContexts {
runtime := mkRuntime(t)
defer nuke(runtime)
buildImage(ctx, t)
}
}
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
func buildImage(context testContextTemplate, t *testing.T) *Image {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
buildfile := NewBuildFile(srv, ioutil.Discard)
if _, err := buildfile.Build(mkTestContext(ctx.dockerfile, ctx.files, t)); err != nil {
t.Fatal(err)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
buildfile := NewBuildFile(srv, ioutil.Discard, false)
id, err := buildfile.Build(mkTestContext(context.dockerfile, context.files, t))
if err != nil {
t.Fatal(err)
}
img, err := srv.ImageInspect(id)
if err != nil {
t.Fatal(err)
}
return img
}
func TestVolume(t *testing.T) {
img := buildImage(testContextTemplate{`
from %s
volume /test
cmd Hello world
`, nil}, t)
if len(img.Config.Volumes) == 0 {
t.Fail()
}
for key := range img.Config.Volumes {
if key != "/test" {
t.Fail()
}
}
}
func TestBuildMaintainer(t *testing.T) {
img := buildImage(testContextTemplate{`
from %s
maintainer dockerio
`, nil}, t)
if img.Author != "dockerio" {
t.Fail()
}
}
func TestBuildEnv(t *testing.T) {
img := buildImage(testContextTemplate{`
from %s
env port 4243
`,
nil}, t)
if img.Config.Env[0] != "port=4243" {
t.Fail()
}
}
func TestBuildCmd(t *testing.T) {
img := buildImage(testContextTemplate{`
from %s
cmd ["/bin/echo", "Hello World"]
`,
nil}, t)
if img.Config.Cmd[0] != "/bin/echo" {
t.Log(img.Config.Cmd[0])
t.Fail()
}
if img.Config.Cmd[1] != "Hello World" {
t.Log(img.Config.Cmd[1])
t.Fail()
}
}
func TestBuildExpose(t *testing.T) {
img := buildImage(testContextTemplate{`
from %s
expose 4243
`,
nil}, t)
if img.Config.PortSpecs[0] != "4243" {
t.Fail()
}
}
func TestBuildEntrypoint(t *testing.T) {
img := buildImage(testContextTemplate{`
from %s
entrypoint ["/bin/echo"]
`,
nil}, t)
if img.Config.Entrypoint[0] != "/bin/echo" {
}
}

View file

@ -27,7 +27,7 @@ import (
"unicode"
)
const VERSION = "0.4.8"
const VERSION = "0.5.0-dev"
var (
GITCOMMIT string
@ -89,12 +89,13 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
{"login", "Register or Login to the docker registry server"},
{"logs", "Fetch the logs of a container"},
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
{"top", "Lookup the running processes of a container"},
{"ps", "List containers"},
{"pull", "Pull an image or a repository from the docker registry server"},
{"push", "Push an image or a repository to the docker registry server"},
{"restart", "Restart a running container"},
{"rm", "Remove a container"},
{"rmi", "Remove an image"},
{"rm", "Remove one or more containers"},
{"rmi", "Remove one or more images"},
{"run", "Run a command in a new container"},
{"search", "Search for an image in the docker index"},
{"start", "Start a stopped container"},
@ -157,6 +158,8 @@ func mkBuildContext(dockerfile string, files [][2]string) (Archive, error) {
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH")
tag := cmd.String("t", "", "Tag to be applied to the resulting image in case of success")
suppressOutput := cmd.Bool("q", false, "Suppress verbose build output")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -194,6 +197,10 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
// Upload the build context
v := &url.Values{}
v.Set("t", *tag)
if *suppressOutput {
v.Set("q", "1")
}
if isRemote {
v.Set("remote", cmd.Arg(0))
}
@ -279,47 +286,66 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
return readStringOnRawTerminal(stdin, stdout, false)
}
oldState, err := term.SetRawTerminal(cli.terminalFd)
cmd := Subcmd("login", "[OPTIONS]", "Register or Login to the docker registry server")
flUsername := cmd.String("u", "", "username")
flPassword := cmd.String("p", "", "password")
flEmail := cmd.String("e", "", "email")
err := cmd.Parse(args)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
cmd := Subcmd("login", "", "Register or Login to the docker registry server")
if err := cmd.Parse(args); err != nil {
return nil
}
var oldState *term.State
if *flUsername == "" || *flPassword == "" || *flEmail == "" {
oldState, err = term.SetRawTerminal(cli.terminalFd)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
}
var (
username string
password string
email string
)
fmt.Fprintf(cli.out, "Username (%s):", cli.authConfig.Username)
username = readAndEchoString(cli.in, cli.out)
if username == "" {
username = cli.authConfig.Username
if *flUsername == "" {
fmt.Fprintf(cli.out, "Username (%s): ", cli.authConfig.Username)
username = readAndEchoString(cli.in, cli.out)
if username == "" {
username = cli.authConfig.Username
}
} else {
username = *flUsername
}
if username != cli.authConfig.Username {
fmt.Fprintf(cli.out, "Password: ")
password = readString(cli.in, cli.out)
if password == "" {
return fmt.Errorf("Error : Password Required")
if *flPassword == "" {
fmt.Fprintf(cli.out, "Password: ")
password = readString(cli.in, cli.out)
if password == "" {
return fmt.Errorf("Error : Password Required")
}
} else {
password = *flPassword
}
fmt.Fprintf(cli.out, "Email (%s): ", cli.authConfig.Email)
email = readAndEchoString(cli.in, cli.out)
if email == "" {
email = cli.authConfig.Email
if *flEmail == "" {
fmt.Fprintf(cli.out, "Email (%s): ", cli.authConfig.Email)
email = readAndEchoString(cli.in, cli.out)
if email == "" {
email = cli.authConfig.Email
}
} else {
email = *flEmail
}
} else {
password = cli.authConfig.Password
email = cli.authConfig.Email
}
term.RestoreTerminal(cli.terminalFd, oldState)
if oldState != nil {
term.RestoreTerminal(cli.terminalFd, oldState)
}
cli.authConfig.Username = username
cli.authConfig.Password = password
cli.authConfig.Email = email
@ -449,7 +475,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
func (cli *DockerCli) CmdStop(args ...string) error {
cmd := Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container")
nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
nSeconds := cmd.Int("t", 10, "Number of seconds to try to stop for before killing the container. Default=10")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -474,7 +500,7 @@ func (cli *DockerCli) CmdStop(args ...string) error {
func (cli *DockerCli) CmdRestart(args ...string) error {
cmd := Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
nSeconds := cmd.Int("t", 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -554,6 +580,33 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
return nil
}
func (cli *DockerCli) CmdTop(args ...string) error {
cmd := Subcmd("top", "CONTAINER", "Lookup the running processes of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top", nil)
if err != nil {
return err
}
var procs []APITop
err = json.Unmarshal(body, &procs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
fmt.Fprintln(w, "PID\tTTY\tTIME\tCMD")
for _, proc := range procs {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", proc.PID, proc.Tty, proc.Time, proc.Cmd)
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdPort(args ...string) error {
cmd := Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
@ -564,6 +617,13 @@ func (cli *DockerCli) CmdPort(args ...string) error {
return nil
}
port := cmd.Arg(1)
proto := "Tcp"
parts := strings.SplitN(port, "/", 2)
if len(parts) == 2 && len(parts[1]) != 0 {
port = parts[0]
proto = strings.ToUpper(parts[1][:1]) + strings.ToLower(parts[1][1:])
}
body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
if err != nil {
return err
@ -574,7 +634,7 @@ func (cli *DockerCli) CmdPort(args ...string) error {
return err
}
if frontend, exists := out.NetworkSettings.PortMapping[cmd.Arg(1)]; exists {
if frontend, exists := out.NetworkSettings.PortMapping[proto][port]; exists {
fmt.Fprintf(cli.out, "%s\n", frontend)
} else {
return fmt.Errorf("Error: No private port '%s' allocated on %s", cmd.Arg(1), cmd.Arg(0))
@ -584,7 +644,7 @@ func (cli *DockerCli) CmdPort(args ...string) error {
// 'docker rmi IMAGE' removes all images with the name IMAGE
func (cli *DockerCli) CmdRmi(args ...string) error {
cmd := Subcmd("rmi", "IMAGE [IMAGE...]", "Remove an image")
cmd := Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -649,7 +709,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
}
func (cli *DockerCli) CmdRm(args ...string) error {
cmd := Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove a container")
cmd := Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
v := cmd.Bool("v", false, "Remove the volumes associated to the container")
if err := cmd.Parse(args); err != nil {
return nil
@ -719,7 +779,7 @@ func (cli *DockerCli) CmdImport(args ...string) error {
}
func (cli *DockerCli) CmdPush(args ...string) error {
cmd := Subcmd("push", "[OPTION] NAME", "Push an image or a repository to the registry")
cmd := Subcmd("push", "NAME", "Push an image or a repository to the registry")
if err := cmd.Parse(args); err != nil {
return nil
}
@ -767,7 +827,9 @@ func (cli *DockerCli) CmdPull(args ...string) error {
}
remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0))
*tag = parsedTag
if *tag == "" {
*tag = parsedTag
}
v := url.Values{}
v.Set("fromImage", remote)
@ -1043,10 +1105,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
return nil
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?logs=1&stdout=1", false, nil, cli.out); err != nil {
return err
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?logs=1&stderr=1", false, nil, cli.err); err != nil {
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?logs=1&stdout=1&stderr=1", false, nil, cli.out); err != nil {
return err
}
return nil
@ -1193,10 +1252,22 @@ func (opts PathOpts) String() string {
}
func (opts PathOpts) Set(val string) error {
if !filepath.IsAbs(val) {
return fmt.Errorf("%s is not an absolute path", val)
var containerPath string
splited := strings.SplitN(val, ":", 2)
if len(splited) == 1 {
containerPath = splited[0]
val = filepath.Clean(splited[0])
} else {
containerPath = splited[1]
val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1]))
}
opts[filepath.Clean(val)] = struct{}{}
if !filepath.IsAbs(containerPath) {
utils.Debugf("%s is not an absolute path", containerPath)
return fmt.Errorf("%s is not an absolute path", containerPath)
}
opts[val] = struct{}{}
return nil
}
@ -1237,6 +1308,18 @@ func (cli *DockerCli) CmdRun(args ...string) error {
return nil
}
var containerIDFile *os.File
if len(hostConfig.ContainerIDFile) > 0 {
if _, err := ioutil.ReadFile(hostConfig.ContainerIDFile); err == nil {
return fmt.Errorf("cid file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile)
}
containerIDFile, err = os.Create(hostConfig.ContainerIDFile)
if err != nil {
return fmt.Errorf("failed to create the container ID file: %s", err)
}
defer containerIDFile.Close()
}
//create the container
body, statusCode, err := cli.call("POST", "/containers/create", config)
//if image not found try to pull it
@ -1267,6 +1350,11 @@ func (cli *DockerCli) CmdRun(args ...string) error {
for _, warning := range runResult.Warnings {
fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
}
if len(hostConfig.ContainerIDFile) > 0 {
if _, err = containerIDFile.WriteString(runResult.ID); err != nil {
return fmt.Errorf("failed to write the container ID to the file: %s", err)
}
}
//start the container
if _, _, err = cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig); err != nil {

View file

@ -59,7 +59,6 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
return nil
}
// TestRunHostname checks that 'docker run -h' correctly sets a custom hostname
func TestRunHostname(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
@ -91,7 +90,6 @@ func TestRunHostname(t *testing.T) {
}
// TestAttachStdin checks attaching to stdin without stdout and stderr.
// 'docker run -i -a stdin' should sends the client's stdin to the command,
// then detach from it and print the container id.
@ -144,15 +142,17 @@ func TestRunAttachStdin(t *testing.T) {
})
// Check logs
if cmdLogs, err := container.ReadLog("stdout"); err != nil {
if cmdLogs, err := container.ReadLog("json"); err != nil {
t.Fatal(err)
} else {
if output, err := ioutil.ReadAll(cmdLogs); err != nil {
t.Fatal(err)
} else {
expectedLog := "hello\nhi there\n"
if string(output) != expectedLog {
t.Fatalf("Unexpected logs: should be '%s', not '%s'\n", expectedLog, output)
expectedLogs := []string{"{\"log\":\"hello\\n\",\"stream\":\"stdout\"", "{\"log\":\"hi there\\n\",\"stream\":\"stdout\""}
for _, expectedLog := range expectedLogs {
if !strings.Contains(string(output), expectedLog) {
t.Fatalf("Unexpected logs: should contains '%s', it is not '%s'\n", expectedLog, output)
}
}
}
}

View file

@ -80,7 +80,8 @@ type Config struct {
}
type HostConfig struct {
Binds []string
Binds []string
ContainerIDFile string
}
type BindMap struct {
@ -103,6 +104,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
@ -121,14 +123,11 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
cmd.Var(flVolumes, "v", "Attach a data volume")
cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
var flBinds ListOpts
cmd.Var(&flBinds, "b", "Bind mount a volume from the host (e.g. -b /host:/container)")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
@ -146,11 +145,17 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
}
}
var binds []string
// add any bind targets to the list of container volumes
for _, bind := range flBinds {
for bind := range flVolumes {
arr := strings.Split(bind, ":")
dstDir := arr[1]
flVolumes[dstDir] = struct{}{}
if len(arr) > 1 {
dstDir := arr[1]
flVolumes[dstDir] = struct{}{}
binds = append(binds, bind)
delete(flVolumes, bind)
}
}
parsedArgs := cmd.Args()
@ -187,7 +192,8 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
Entrypoint: entrypoint,
}
hostConfig := &HostConfig{
Binds: flBinds,
Binds: binds,
ContainerIDFile: *flContainerIDFile,
}
if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
@ -270,6 +276,26 @@ func (container *Container) ToDisk() (err error) {
return ioutil.WriteFile(container.jsonPath(), data, 0666)
}
func (container *Container) ReadHostConfig() (*HostConfig, error) {
data, err := ioutil.ReadFile(container.hostConfigPath())
if err != nil {
return &HostConfig{}, err
}
hostConfig := &HostConfig{}
if err := json.Unmarshal(data, hostConfig); err != nil {
return &HostConfig{}, err
}
return hostConfig, nil
}
func (container *Container) SaveHostConfig(hostConfig *HostConfig) (err error) {
data, err := json.Marshal(hostConfig)
if err != nil {
return
}
return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
}
func (container *Container) generateLXCConfig() error {
fo, err := os.Create(container.lxcConfigPath())
if err != nil {
@ -474,6 +500,10 @@ func (container *Container) Start(hostConfig *HostConfig) error {
container.State.Lock()
defer container.State.Unlock()
if len(hostConfig.Binds) == 0 {
hostConfig, _ = container.ReadHostConfig()
}
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.ID)
}
@ -493,8 +523,6 @@ func (container *Container) Start(hostConfig *HostConfig) error {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
container.Volumes = make(map[string]string)
container.VolumesRW = make(map[string]bool)
// Create the requested bind mounts
binds := make(map[string]BindMap)
@ -534,30 +562,35 @@ func (container *Container) Start(hostConfig *HostConfig) error {
// FIXME: evaluate volumes-from before individual volumes, so that the latter can override the former.
// Create the requested volumes volumes
for volPath := range container.Config.Volumes {
volPath = path.Clean(volPath)
// If an external bind is defined for this volume, use that as a source
if bindMap, exists := binds[volPath]; exists {
container.Volumes[volPath] = bindMap.SrcPath
if strings.ToLower(bindMap.Mode) == "rw" {
container.VolumesRW[volPath] = true
if container.Volumes == nil || len(container.Volumes) == 0 {
container.Volumes = make(map[string]string)
container.VolumesRW = make(map[string]bool)
for volPath := range container.Config.Volumes {
volPath = path.Clean(volPath)
// If an external bind is defined for this volume, use that as a source
if bindMap, exists := binds[volPath]; exists {
container.Volumes[volPath] = bindMap.SrcPath
if strings.ToLower(bindMap.Mode) == "rw" {
container.VolumesRW[volPath] = true
}
// Otherwise create an directory in $ROOT/volumes/ and use that
} else {
c, err := container.runtime.volumes.Create(nil, container, "", "", nil)
if err != nil {
return err
}
srcPath, err := c.layer()
if err != nil {
return err
}
container.Volumes[volPath] = srcPath
container.VolumesRW[volPath] = true // RW by default
}
// Otherwise create an directory in $ROOT/volumes/ and use that
} else {
c, err := container.runtime.volumes.Create(nil, container, "", "", nil)
if err != nil {
return err
// Create the mountpoint
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
srcPath, err := c.layer()
if err != nil {
return err
}
container.Volumes[volPath] = srcPath
container.VolumesRW[volPath] = true // RW by default
}
// Create the mountpoint
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
}
@ -574,6 +607,9 @@ func (container *Container) Start(hostConfig *HostConfig) error {
return nil
}
container.Volumes[volPath] = id
if isRW, exists := c.VolumesRW[volPath]; exists {
container.VolumesRW[volPath] = isRW
}
}
}
@ -617,10 +653,10 @@ func (container *Container) Start(hostConfig *HostConfig) error {
container.cmd = exec.Command("lxc-start", params...)
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
return err
}
if err := container.runtime.LogToDisk(container.stderr, container.logPath("stderr")); err != nil {
if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
return err
}
@ -641,6 +677,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
container.waitLock = make(chan struct{})
container.ToDisk()
container.SaveHostConfig(hostConfig)
go container.monitor()
return nil
}
@ -678,13 +715,13 @@ func (container *Container) StdinPipe() (io.WriteCloser, error) {
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stdout.AddWriter(writer)
container.stdout.AddWriter(writer, "")
return utils.NewBufReader(reader), nil
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stderr.AddWriter(writer)
container.stderr.AddWriter(writer, "")
return utils.NewBufReader(reader), nil
}
@ -979,6 +1016,10 @@ func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) hostConfigPath() string {
return path.Join(container.root, "hostconfig.json")
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}

View file

@ -758,6 +758,23 @@ func TestUser(t *testing.T) {
if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") {
t.Error(string(output))
}
// Test an wrong username
container, err = builder.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
User: "unkownuser",
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
output, err = container.Output()
if container.State.ExitCode == 0 {
t.Fatal("Starting container with wrong uid should fail but it passed.")
}
}
func TestMultipleContainers(t *testing.T) {
@ -1123,18 +1140,114 @@ func TestBindMounts(t *testing.T) {
writeFile(path.Join(tmpDir, "touch-me"), "", t)
// Test reading from a read-only bind mount
stdout, _ := runContainer(r, []string{"-b", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t)
stdout, _ := runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t)
if !strings.Contains(stdout, "touch-me") {
t.Fatal("Container failed to read from bind mount")
}
// test writing to bind mount
runContainer(r, []string{"-b", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t)
runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t)
readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist
// test mounting to an illegal destination directory
if _, err := runContainer(r, []string{"-b", fmt.Sprintf("%s:.", tmpDir), "ls", "."}, nil); err == nil {
if _, err := runContainer(r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "ls", "."}, nil); err == nil {
t.Fatal("Container bind mounted illegal directory")
}
}
// Test that VolumesRW values are copied to the new container. Regression test for #1201
func TestVolumesFromReadonlyMount(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
_, err = container.Output()
if err != nil {
t.Fatal(err)
}
if !container.VolumesRW["/test"] {
t.Fail()
}
container2, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: container.ID,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
_, err = container2.Output()
if err != nil {
t.Fatal(err)
}
if container.Volumes["/test"] != container2.Volumes["/test"] {
t.Fail()
}
actual, exists := container2.VolumesRW["/test"]
if !exists {
t.Fail()
}
if container.VolumesRW["/test"] != actual {
t.Fail()
}
}
// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819.
func TestRestartWithVolumes(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
for key := range container.Config.Volumes {
if key != "/test" {
t.Fail()
}
}
_, err = container.Output()
if err != nil {
t.Fatal(err)
}
expected := container.Volumes["/test"]
if expected == "" {
t.Fail()
}
// Run the container again to verify the volume path persists
_, err = container.Output()
if err != nil {
t.Fatal(err)
}
actual := container.Volumes["/test"]
if expected != actual {
t.Fatalf("Expected volume path: %s Actual path: %s", expected, actual)
}
}

View file

@ -29,6 +29,11 @@ You can still call an old version of the api using /v1.0/images/<name>/insert
What's new
----------
Listing processes (/top):
- List the processes inside a container
Builder (/build):
- Simplify the upload of the build context

View file

@ -220,6 +220,46 @@ Inspect a container
:statuscode 500: server error
List processes running inside a container
*****************************************
.. http:get:: /containers/(id)/top
List processes running inside the container ``id``
**Example request**:
.. sourcecode:: http
GET /containers/4fa6e0f0c678/top HTTP/1.1
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"PID":"11935",
"Tty":"pts/2",
"Time":"00:00:00",
"Cmd":"sh"
},
{
"PID":"12140",
"Tty":"pts/2",
"Time":"00:00:00",
"Cmd":"sleep"
}
]
:statuscode 200: no error
:statuscode 404: no such container
:statuscode 500: server error
Inspect changes on a container's filesystem
*******************************************
@ -881,6 +921,7 @@ Build an image from Dockerfile via stdin
The Content-type header should be set to "application/tar".
:query t: tag to be applied to the resulting image in case of success
:query q: suppress verbose build output
:statuscode 200: no error
:statuscode 500: server error

View file

@ -52,5 +52,6 @@ Available Commands
command/start
command/stop
command/tag
command/top
command/version
command/wait

View file

@ -11,6 +11,7 @@
Usage: docker build [OPTIONS] PATH | URL | -
Build a new container image from the source code at PATH
-t="": Tag to be applied to the resulting image in case of success.
-q=false: Suppress verbose build output.
When a single Dockerfile is given as URL, then no context is set. When a git repository is set as URL, the repository is used as context

View file

@ -8,6 +8,10 @@
::
Usage: docker login
Usage: docker login [OPTIONS]
Register or Login to the docker registry server
-e="": email
-p="": password
-u="": username

View file

@ -10,4 +10,4 @@
Usage: docker rm [OPTIONS] CONTAINER
Remove a container
Remove one or more containers

View file

@ -8,6 +8,6 @@
::
Usage: docker rmimage [OPTIONS] IMAGE
Usage: docker rmi IMAGE [IMAGE...]
Remove an image
Remove one or more images

View file

@ -14,6 +14,7 @@
-a=map[]: Attach to stdin, stdout or stderr.
-c=0: CPU shares (relative weight)
-cidfile="": Write the container ID to the file
-d=false: Detached mode: leave the container running in the background
-e=[]: Set environment variables
-h="": Container host name
@ -23,7 +24,16 @@
-t=false: Allocate a pseudo-tty
-u="": Username or UID
-d=[]: Set custom dns servers for the container
-v=[]: Creates a new volume and mounts it at the specified path.
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "host-dir" is missing, then docker creates a new volume.
-volumes-from="": Mount all volumes from the given container.
-b=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]
-entrypoint="": Overwrite the default entrypoint set by the image.
Examples
--------
.. code-block:: bash
docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
| This will create a container and print "test" to the console. The cidfile flag makes docker attempt to create a new file and write the container ID to it. If the file exists already, docker will return an error. Docker will close this file when docker run exits.

View file

@ -0,0 +1,13 @@
:title: Top Command
:description: Lookup the running processes of a container
:keywords: top, docker, container, documentation
=======================================================
``top`` -- Lookup the running processes of a container
=======================================================
::
Usage: docker top CONTAINER
Lookup the running processes of a container

View file

@ -1,25 +1,27 @@
:title: Docker Builder
:title: Dockerfile Builder
:description: Docker Builder specifes a simple DSL which allows you to automate the steps you would normally manually take to create an image.
:keywords: builder, docker, Docker Builder, automation, image creation
==============
Docker Builder
==============
==================
Dockerfile Builder
==================
**Docker can act as a builder** and read instructions from a text
Dockerfile to automate the steps you would otherwise make manually to
create an image. Executing ``docker build`` will run your steps and
commit them along the way, giving you a final image.
.. contents:: Table of Contents
Docker Builder specifes a simple DSL which allows you to automate the steps you
would normally manually take to create an image. Docker Build will run your
steps and commit them along the way, giving you a final image.
1. Usage
========
To build an image from a source repository, create a description file called `Dockerfile`
at the root of your repository. This file will describe the steps to assemble
the image.
To build an image from a source repository, create a description file
called ``Dockerfile`` at the root of your repository. This file will
describe the steps to assemble the image.
Then call `docker build` with the path of your source repository as argument:
Then call ``docker build`` with the path of your source repository as
argument:
``docker build .``
@ -36,131 +38,164 @@ before finally outputting the ID of your new image.
The Dockerfile format is quite simple:
``instruction arguments``
::
# Comment
INSTRUCTION arguments
The Instruction is not case-sensitive, however convention is for them to be
UPPERCASE in order to distinguish them from arguments more easily.
Dockerfiles are evaluated in order, therefore the first instruction must be
`FROM` in order to specify the base image from which you are building.
Docker evaluates the instructions in a Dockerfile in order. **The first
instruction must be `FROM`** in order to specify the base image from
which you are building.
Docker will ignore lines in Dockerfiles prefixed with "`#`", so you may add
comment lines. A comment marker in the rest of the line will be treated as an
argument.
Docker will ignore **comment lines** *beginning* with ``#``. A comment
marker anywhere in the rest of the line will be treated as an argument.
2. Instructions
3. Instructions
===============
Docker builder comes with a set of instructions, described below.
Here is the set of instructions you can use in a ``Dockerfile`` for
building images.
2.1 FROM
3.1 FROM
--------
``FROM <image>``
The `FROM` instruction sets the base image for subsequent instructions. As such,
a valid Dockerfile must have it as its first instruction.
The ``FROM`` instruction sets the :ref:`base_image_def` for subsequent
instructions. As such, a valid Dockerfile must have ``FROM`` as its
first instruction.
`FROM` can be included multiple times within a single Dockerfile in order to
create multiple images. Simply make a note of the last image id output by the
commit before each new `FROM` command.
``FROM`` must be the first non-comment instruction in the
``Dockerfile``.
2.2 MAINTAINER
``FROM`` can appear multiple times within a single Dockerfile in order
to create multiple images. Simply make a note of the last image id
output by the commit before each new ``FROM`` command.
3.2 MAINTAINER
--------------
``MAINTAINER <name>``
The `MAINTAINER` instruction allows you to set the Author field of the generated
images.
The ``MAINTAINER`` instruction allows you to set the *Author* field of
the generated images.
2.3 RUN
3.3 RUN
-------
``RUN <command>``
The `RUN` instruction will execute any commands on the current image and commit
the results. The resulting committed image will be used for the next step in the
Dockerfile.
The ``RUN`` instruction will execute any commands on the current image
and commit the results. The resulting committed image will be used for
the next step in the Dockerfile.
Layering `RUN` instructions and generating commits conforms to the
core concepts of Docker where commits are cheap and containers can be created
from any point in an image's history, much like source control.
Layering ``RUN`` instructions and generating commits conforms to the
core concepts of Docker where commits are cheap and containers can be
created from any point in an image's history, much like source
control.
2.4 CMD
3.4 CMD
-------
``CMD <command>``
The `CMD` instruction sets the command to be executed when running the image.
This is functionally equivalent to running
`docker commit -run '{"Cmd": <command>}'` outside the builder.
The ``CMD`` instruction sets the command to be executed when running
the image. This is functionally equivalent to running ``docker commit
-run '{"Cmd": <command>}'`` outside the builder.
.. note::
Don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits
the result; `CMD` does not execute anything at build time, but specifies the
intended command for the image.
.. note::
Don't confuse `RUN` with `CMD`. `RUN` actually runs a
command and commits the result; `CMD` does not execute anything at
build time, but specifies the intended command for the image.
2.5 EXPOSE
3.5 EXPOSE
----------
``EXPOSE <port> [<port>...]``
The `EXPOSE` instruction sets ports to be publicly exposed when running the
image. This is functionally equivalent to running
`docker commit -run '{"PortSpecs": ["<port>", "<port2>"]}'` outside the builder.
The ``EXPOSE`` instruction sets ports to be publicly exposed when
running the image. This is functionally equivalent to running ``docker
commit -run '{"PortSpecs": ["<port>", "<port2>"]}'`` outside the
builder.
2.6 ENV
3.6 ENV
-------
``ENV <key> <value>``
The `ENV` instruction sets the environment variable `<key>` to the value
`<value>`. This value will be passed to all future ``RUN`` instructions. This is
functionally equivalent to prefixing the command with `<key>=<value>`
The ``ENV`` instruction sets the environment variable ``<key>`` to the
value ``<value>``. This value will be passed to all future ``RUN``
instructions. This is functionally equivalent to prefixing the command
with ``<key>=<value>``
.. note::
The environment variables will persist when a container is run from the resulting image.
.. note::
The environment variables will persist when a container is run
from the resulting image.
2.7 ADD
3.7 ADD
-------
``ADD <src> <dest>``
The `ADD` instruction will copy new files from <src> and add them to the container's filesystem at path `<dest>`.
The ``ADD`` instruction will copy new files from <src> and add them to
the container's filesystem at path ``<dest>``.
`<src>` must be the path to a file or directory relative to the source directory being built (also called the
context of the build) or a remote file URL.
``<src>`` must be the path to a file or directory relative to the
source directory being built (also called the *context* of the build) or
a remote file URL.
`<dest>` is the path at which the source will be copied in the destination container.
``<dest>`` is the path at which the source will be copied in the
destination container.
The copy obeys the following rules:
If `<src>` is a directory, the entire directory is copied, including filesystem metadata.
* If ``<src>`` is a directory, the entire directory is copied,
including filesystem metadata.
* If ``<src>``` is a tar archive in a recognized compression format
(identity, gzip, bzip2 or xz), it is unpacked as a directory.
If `<src>` is a tar archive in a recognized compression format (identity, gzip, bzip2 or xz), it
is unpacked as a directory.
When a directory is copied or unpacked, it has the same behavior as
``tar -x``: the result is the union of
When a directory is copied or unpacked, it has the same behavior as 'tar -x': the result is the union of
a) whatever existed at the destination path and b) the contents of the source tree, with conflicts resolved
in favor of b on a file-by-file basis.
1. whatever existed at the destination path and
2. the contents of the source tree,
If `<src>` is any other kind of file, it is copied individually along with its metadata. In this case,
if `<dst>` ends with a trailing slash '/', it will be considered a directory and the contents of `<src>`
will be written at `<dst>/base(<src>)`.
If `<dst>` does not end with a trailing slash, it will be considered a regular file and the contents
of `<src>` will be written at `<dst>`.
with conflicts resolved in favor of 2) on a file-by-file basis.
If `<dest>` doesn't exist, it is created along with all missing directories in its path. All new
files and directories are created with mode 0700, uid and gid 0.
* If ``<src>`` is any other kind of file, it is copied individually
along with its metadata. In this case, if ``<dst>`` ends with a
trailing slash ``/``, it will be considered a directory and the
contents of ``<src>`` will be written at ``<dst>/base(<src>)``.
* If ``<dst>`` does not end with a trailing slash, it will be
considered a regular file and the contents of ``<src>`` will be
written at ``<dst>``.
* If ``<dest>`` doesn't exist, it is created along with all missing
directories in its path. All new files and directories are created
with mode 0700, uid and gid 0.
2.8 ENTRYPOINT
3.8 ENTRYPOINT
-------------
``ENTRYPOINT /bin/echo``
The `ENTRYPOINT` instruction adds an entry command that will not be overwritten when arguments are passed to docker run, unlike the behavior of `CMD`. This allows arguments to be passed to the entrypoint. i.e. `docker run <image> -d` will pass the "-d" argument to the entrypoint.
The ``ENTRYPOINT`` instruction adds an entry command that will not be
overwritten when arguments are passed to docker run, unlike the
behavior of ``CMD``. This allows arguments to be passed to the
entrypoint. i.e. ``docker run <image> -d`` will pass the "-d" argument
to the entrypoint.
3. Dockerfile Examples
3.9 VOLUME
----------
``VOLUME ["/data"]``
The ``VOLUME`` instruction will add one or more new volumes to any
container created from the image.
4. Dockerfile Examples
======================
.. code-block:: bash

View file

@ -7,21 +7,69 @@
Working with Repositories
=========================
A *repository* is a hosted collection of tagged :ref:`images
<image_def>` that together create the file system for a container. The
repository's name is a tag that indicates the provenance of the
repository, i.e. who created it and where the original copy is
located.
Top-level repositories and user repositories
--------------------------------------------
You can find one or more repositories hosted on a *registry*. There
can be an implicit or explicit host name as part of the repository
tag. The implicit registry is located at ``index.docker.io``, the home
of "top-level" repositories and the Central Index. This registry may
also include public "user" repositories.
Generally, there are two types of repositories: Top-level repositories
which are controlled by the people behind Docker, and user
repositories.
So Docker is not only a tool for creating and managing your own
:ref:`containers <container_def>` -- **Docker is also a tool for
sharing**. The Docker project provides a Central Registry to host
public repositories, namespaced by user, and a Central Index which
provides user authentication and search over all the public
repositories. You can host your own Registry too! Docker acts as a
client for these services via ``docker search, pull, login`` and
``push``.
* Top-level repositories can easily be recognized by not having a ``/`` (slash) in their name. These repositories can generally be trusted.
* User repositories always come in the form of ``<username>/<repo_name>``. This is what your published images will look like.
* User images are not checked, it is therefore up to you whether or not you trust the creator of this image.
Top-level, User, and Your Own Repositories
------------------------------------------
There are two types of public repositories: *top-level* repositories
which are controlled by the Docker team, and *user* repositories
created by individual contributors.
Find public images available on the index
-----------------------------------------
* Top-level repositories can easily be recognized by **not** having a
``/`` (slash) in their name. These repositories can generally be
trusted.
* User repositories always come in the form of
``<username>/<repo_name>``. This is what your published images will
look like if you push to the public Central Registry.
* Only the authenticated user can push to their *username* namespace
on the Central Registry.
* User images are not checked, it is therefore up to you whether or
not you trust the creator of this image.
Right now (version 0.5), private repositories are only possible by
hosting `your own registry
<https://github.com/dotcloud/docker-registry>`_. To push or pull to a
repository on your own registry, you must prefix the tag with the
address of the registry's host, like this:
.. code-block:: bash
# Tag to create a repository with the full registry location.
# The location (e.g. localhost.localdomain:5000) becomes
# a permanent part of the repository name
docker tag 0u812deadbeef localhost.localdomain:5000/repo_name
# Push the new repository to its home location on localhost
docker push localhost.localdomain:5000/repo_name
Once a repository has your registry's host name as part of the tag,
you can push and pull it like any other repository, but it will
**not** be searchable (or indexed at all) in the Central Index, and
there will be no user name checking performed. Your registry will
function completely independently from the Central Index.
Find public images available on the Central Index
-------------------------------------------------
Seach by name, namespace or description
@ -37,68 +85,48 @@ Download them simply by their name
docker pull <value>
Very similarly you can search for and browse the index online on https://index.docker.io
Very similarly you can search for and browse the index online on
https://index.docker.io
Connecting to the repository
----------------------------
Connecting to the Central Registry
----------------------------------
You can create a user on the central docker repository online, or by running
You can create a user on the central Docker Index online, or by running
.. code-block:: bash
docker login
This will prompt you for a username, which will become a public
namespace for your public repositories.
If your username does not exist it will prompt you to also enter a password and your e-mail address. It will then
automatically log you in.
If your username does not exist it will prompt you to also enter a
password and your e-mail address. It will then automatically log you
in.
Committing a container to a named image
---------------------------------------
In order to commit to the repository it is required to have committed your container to an image with your namespace.
In order to commit to the repository it is required to have committed
your container to an image within your username namespace.
.. code-block:: bash
# for example docker commit $CONTAINER_ID dhrp/kickassapp
docker commit <container_id> <your username>/<some_name>
docker commit <container_id> <username>/<repo_name>
Pushing a container to the repository
-----------------------------------------
Pushing a container to its repository
------------------------------------
In order to push an image to the repository you need to have committed your container to a named image (see above)
In order to push an image to its repository you need to have committed
your container to a named image (see above)
Now you can commit this image to the repository
.. code-block:: bash
# for example docker push dhrp/kickassapp
docker push <image-name>
Changing the server to connect to
----------------------------------
When you are running your own index and/or registry, You can change the server the docker client will connect to.
Variable
^^^^^^^^
.. code-block:: sh
DOCKER_INDEX_URL
Setting this environment variable on the docker server will change the URL docker index.
This address is used in commands such as ``docker login``, ``docker push`` and ``docker pull``.
The docker daemon doesn't need to be restarted for this parameter to take effect.
Example
^^^^^^^
.. code-block:: sh
docker -d &
export DOCKER_INDEX_URL="https://index.docker.io"
docker push <username>/<repo_name>

View file

@ -30,11 +30,12 @@ up-to-date.
* CATEGORY should describe which part of the project is affected.
Valid categories are:
* Runtime
* Remote API
* Builder
* Documentation
* Hack
* Packaging
* Remote API
* Runtime
* DESCRIPTION: a concise description of the change that is relevant to the end-user,
using the present tense.
@ -53,6 +54,10 @@ up-to-date.
### 4. Run all tests
```bash
$ make test
```
### 5. Commit and create a pull request
```bash
@ -109,11 +114,20 @@ up-to-date.
### 9. Publish Ubuntu packages
If everything went well in the previous step, you can finalize the release by submitting the Ubuntu packages.
If everything went well in the previous step, you can finalize the release by submitting the Ubuntu
packages.
```bash
$ RELEASE_IMAGE=image_provided_by_infrastructure_maintainers
$ docker run -e RELEASE_PPA=1 $RELEASE_IMAGE
```
If that goes well, congratulations! You're done.
If that goes well, Ubuntu Precise package is in its way. It will take anywhere from 0.5 to 30 hours
for the builders to complete their job depending on builder demand at this time. At this point, Quantal
and Raring packages need to be created using the Launchpad interface:
https://launchpad.net/~dotcloud/+archive/lxc-docker/+packages
Notify [the packager maintainers](https://github.com/dotcloud/docker/blob/master/packaging/MAINTAINERS)
who will ensure PPA is ready.
Congratulations! You're done

View file

@ -68,6 +68,7 @@ func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) {
from.CloseWrite()
}
}
to.CloseRead()
event <- written
}
utils.Debugf("Forwarding traffic between tcp/%v and tcp/%v", client.RemoteAddr(), backend.RemoteAddr())

View file

@ -13,8 +13,8 @@ PKG_NAME=lxc-docker
ROOT_PATH=$(shell git rev-parse --show-toplevel)
GITHUB_PATH=github.com/dotcloud/docker
BUILD_SRC=build_src
VERSION_TAG?=v$(shell sed -E 's/.+\((.+)-.+\).+/\1/;q' changelog)
VERSION=$(shell echo ${VERSION_TAG} | cut -c2-)
VERSION=$(shell sed -En '0,/^\#\# /{s/^\#\# ([^ ]+).+/\1/p}' ../../CHANGELOG.md)
VERSION_TAG?=v${VERSION}
DOCKER_VERSION=${PKG_NAME}_${VERSION}
all:
@ -28,7 +28,6 @@ install:
mkdir -p $(DESTDIR)/usr/share/doc/lxc-docker
install -m 0755 src/${GITHUB_PATH}/docker/docker $(DESTDIR)/usr/bin/lxc-docker
cp debian/lxc-docker.1 $(DESTDIR)/usr/share/man/man1
cp debian/CHANGELOG.md $(DESTDIR)/usr/share/doc/lxc-docker/changelog
debian:
# Prepare docker source from revision ${VERSION_TAG}
@ -41,6 +40,7 @@ debian:
cp -r `ls | grep -v ${BUILD_SRC}` ${BUILD_SRC}/debian
cp ${ROOT_PATH}/README.md ${BUILD_SRC}
cp ${ROOT_PATH}/CHANGELOG.md ${BUILD_SRC}/debian
./parse_changelog.py < ../../CHANGELOG.md > ${BUILD_SRC}/debian/changelog
# Cleanup
rm -rf `find . -name '.git*'`
rm -f ${DOCKER_VERSION}*

View file

@ -13,6 +13,9 @@ Vagrant::Config.run do |config|
# Install debian packaging dependencies and create debian packages
pkg_cmd = "apt-get -qq update; DEBIAN_FRONTEND=noninteractive apt-get install -qq -y #{PKG_DEP}; " \
"curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.1.linux-amd64.tar.gz; " \
"tar -C /usr/local -xzf /go.tar.gz; rm /usr/bin/go; " \
"ln -s /usr/local/go/bin/go /usr/bin; "\
"export GPG_KEY='#{ENV['GPG_KEY']}'; cd /data/docker/packaging/debian; make debian"
config.vm.provision :shell, :inline => pkg_cmd
end

View file

@ -0,0 +1,23 @@
#!/usr/bin/env python
'Parse main CHANGELOG.md from stdin outputing on stdout the debian changelog'
import sys,re, datetime
on_block=False
for line in sys.stdin.readlines():
line = line.strip()
if line.startswith('# ') or len(line) == 0:
continue
if line.startswith('## '):
if on_block:
print '\n -- dotCloud <ops@dotcloud.com> {0}\n'.format(date)
version, date = line[3:].split()
date = datetime.datetime.strptime(date, '(%Y-%m-%d)').strftime(
'%a, %d %b %Y 00:00:00 -0700')
on_block = True
print 'lxc-docker ({0}-1) precise; urgency=low'.format(version)
continue
if on_block:
print ' ' + line
print '\n -- dotCloud <ops@dotcloud.com> {0}'.format(date)

View file

@ -141,7 +141,6 @@ func (runtime *Runtime) Register(container *Container) error {
utils.Debugf("Restarting")
container.State.Ghost = false
container.State.setStopped(0)
// assume empty host config
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
return err
@ -168,12 +167,12 @@ func (runtime *Runtime) Register(container *Container) error {
return nil
}
func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst string) error {
func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return err
}
src.AddWriter(log)
src.AddWriter(log, stream)
return nil
}

109
server.go
View file

@ -1,6 +1,8 @@
package docker
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"github.com/dotcloud/docker/auth"
@ -12,6 +14,7 @@ import (
"net/http"
"net/url"
"os"
"os/exec"
"path"
"runtime"
"strings"
@ -98,7 +101,7 @@ func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.
return "", err
}
if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("Downloading", "%v/%v (%v)"), sf), path); err != nil {
if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("Downloading", "%8v/%v (%v)"), sf), path); err != nil {
return "", err
}
// FIXME: Handle custom repo, tag comment, author
@ -247,6 +250,40 @@ func (srv *Server) ImageHistory(name string) ([]APIHistory, error) {
}
func (srv *Server) ContainerTop(name string) ([]APITop, error) {
if container := srv.runtime.Get(name); container != nil {
output, err := exec.Command("lxc-ps", "--name", container.ID).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("Error trying to use lxc-ps: %s (%s)", err, output)
}
var procs []APITop
for i, line := range strings.Split(string(output), "\n") {
if i == 0 || len(line) == 0 {
continue
}
proc := APITop{}
scanner := bufio.NewScanner(strings.NewReader(line))
scanner.Split(bufio.ScanWords)
if !scanner.Scan() {
return nil, fmt.Errorf("Error trying to use lxc-ps")
}
// no scanner.Text because we skip container id
scanner.Scan()
proc.PID = scanner.Text()
scanner.Scan()
proc.Tty = scanner.Text()
scanner.Scan()
proc.Time = scanner.Text()
scanner.Scan()
proc.Cmd = scanner.Text()
procs = append(procs, proc)
}
return procs, nil
}
return nil, fmt.Errorf("No such container: %s", name)
}
func (srv *Server) ContainerChanges(name string) ([]Change, error) {
if container := srv.runtime.Get(name); container != nil {
return container.Changes()
@ -343,7 +380,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
return err
}
defer layer.Close()
if err := srv.runtime.graph.Register(utils.ProgressReader(layer, imgSize, out, sf.FormatProgress("Downloading", "%v/%v (%v)"), sf), false, img); err != nil {
if err := srv.runtime.graph.Register(utils.ProgressReader(layer, imgSize, out, sf.FormatProgress("Downloading", "%8v/%v (%v)"), sf), false, img); err != nil {
return err
}
}
@ -666,7 +703,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
}
// Send the layer
if err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("Pushing", "%v/%v (%v)"), sf), ep, token); err != nil {
if err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("Pushing", "%8v/%v (%v)"), sf), ep, token); err != nil {
return err
}
return nil
@ -736,7 +773,7 @@ func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Write
if err != nil {
return err
}
archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf.FormatProgress("Importing", "%v/%v (%v)"), sf)
archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf.FormatProgress("Importing", "%8v/%v (%v)"), sf)
}
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
if err != nil {
@ -834,7 +871,6 @@ func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi) error {
if len(srv.runtime.repositories.ByID()[id]) != 0 {
return ErrImageReferenced
}
// If the image is not referenced but has children, go recursive
referenced := false
byParents, err := srv.runtime.graph.ByParent()
@ -888,8 +924,22 @@ func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error {
}
func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, error) {
//Untag the current image
imgs := []APIRmi{}
//If delete by id, see if the id belong only to one repository
if strings.Contains(img.ID, repoName) && tag == "" {
for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] {
parsedRepo := strings.Split(repoAndTag, ":")[0]
if strings.Contains(img.ID, repoName) {
repoName = parsedRepo
} else if repoName != parsedRepo {
// the id belongs to multiple repos, like base:latest and user:test,
// in that case return conflict
return imgs, nil
}
}
}
//Untag the current image
tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag)
if err != nil {
return nil, err
@ -1006,20 +1056,41 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std
}
//logs
if logs {
if stdout {
cLog, err := container.ReadLog("stdout")
if err != nil {
utils.Debugf("Error reading logs (stdout): %s", err)
} else if _, err := io.Copy(out, cLog); err != nil {
utils.Debugf("Error streaming logs (stdout): %s", err)
cLog, err := container.ReadLog("json")
if err != nil && os.IsNotExist(err) {
// Legacy logs
utils.Debugf("Old logs format")
if stdout {
cLog, err := container.ReadLog("stdout")
if err != nil {
utils.Debugf("Error reading logs (stdout): %s", err)
} else if _, err := io.Copy(out, cLog); err != nil {
utils.Debugf("Error streaming logs (stdout): %s", err)
}
}
}
if stderr {
cLog, err := container.ReadLog("stderr")
if err != nil {
utils.Debugf("Error reading logs (stderr): %s", err)
} else if _, err := io.Copy(out, cLog); err != nil {
utils.Debugf("Error streaming logs (stderr): %s", err)
if stderr {
cLog, err := container.ReadLog("stderr")
if err != nil {
utils.Debugf("Error reading logs (stderr): %s", err)
} else if _, err := io.Copy(out, cLog); err != nil {
utils.Debugf("Error streaming logs (stderr): %s", err)
}
}
} else if err != nil {
utils.Debugf("Error reading logs (json): %s", err)
} else {
dec := json.NewDecoder(cLog)
for {
var l utils.JSONLog
if err := dec.Decode(&l); err == io.EOF {
break
} else if err != nil {
utils.Debugf("Error streaming logs: %s", err)
break
}
if (l.Stream == "stdout" && stdout) || (l.Stream == "stderr" && stderr) {
fmt.Fprintf(out, "%s", l.Log)
}
}
}
}

View file

@ -3,10 +3,10 @@ package docker
import (
"flag"
"fmt"
"github.com/dotcloud/docker/utils"
"log"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"syscall"
@ -27,10 +27,7 @@ func changeUser(u string) {
if u == "" {
return
}
userent, err := user.LookupId(u)
if err != nil {
userent, err = user.Lookup(u)
}
userent, err := utils.UserLookup(u)
if err != nil {
log.Fatalf("Unable to find user %v: %v", u, err)
}

View file

@ -204,15 +204,15 @@ func (store *TagStore) GetImage(repoName, tagOrID string) (*Image, error) {
} else if repo == nil {
return nil, nil
}
//go through all the tags, to see if tag is in fact an ID
if revision, exists := repo[tagOrID]; exists {
return store.graph.Get(revision)
}
// If no matching tag is found, search through images for a matching image id
for _, revision := range repo {
if strings.HasPrefix(revision, tagOrID) {
return store.graph.Get(revision)
}
}
if revision, exists := repo[tagOrID]; exists {
return store.graph.Get(revision)
}
return nil, nil
}

View file

@ -2,6 +2,7 @@ import os
from buildbot.buildslave import BuildSlave
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.timed import Nightly
from buildbot.changes import filter
from buildbot.config import BuilderConfig
from buildbot.process.factory import BuildFactory
@ -40,12 +41,16 @@ c['db'] = {'db_url':"sqlite:///state.sqlite"}
c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)]
c['slavePortnum'] = PORT_MASTER
c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
c['schedulers'].append(SingleBranchScheduler(name="all",
change_filter=filter.ChangeFilter(branch='master'),treeStableTimer=None,
builderNames=[BUILDER_NAME]))
# Schedulers
c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[BUILDER_NAME,
'coverage'])]
c['schedulers'] += [SingleBranchScheduler(name="all",
change_filter=filter.ChangeFilter(branch='master'), treeStableTimer=None,
builderNames=[BUILDER_NAME])]
c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['coverage'],
hour=0, minute=30)]
# Builder
# Builders
factory = BuildFactory()
factory.addStep(ShellCommand(description='Docker',logEnviron=False,usePTY=True,
command=["sh", "-c", Interpolate("cd ..; rm -rf build; export GOPATH={0}; "
@ -53,6 +58,16 @@ factory.addStep(ShellCommand(description='Docker',logEnviron=False,usePTY=True,
"go test -v".format(BUILDER_PATH,GITHUB_DOCKER,DOCKER_BUILD_PATH))]))
c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
factory=factory)]
# Docker coverage test
coverage_cmd = ('GOPATH=`pwd` go get -d github.com/dotcloud/docker\n'
'GOPATH=`pwd` go get github.com/axw/gocov/gocov\n'
'sudo -E GOPATH=`pwd` ./bin/gocov test github.com/dotcloud/docker | '
'./bin/gocov report')
factory = BuildFactory()
factory.addStep(ShellCommand(description='Coverage',logEnviron=False,usePTY=True,
command=coverage_cmd))
c['builders'] += [BuilderConfig(name='coverage',slavenames=['buildworker'],
factory=factory)]
# Status
authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]),

View file

@ -1,5 +1,9 @@
package docker
import (
"strings"
)
// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields
// If OpenStdin is set, then it differs
func CompareConfig(a, b *Config) bool {
@ -20,7 +24,8 @@ func CompareConfig(a, b *Config) bool {
if len(a.Cmd) != len(b.Cmd) ||
len(a.Dns) != len(b.Dns) ||
len(a.Env) != len(b.Env) ||
len(a.PortSpecs) != len(b.PortSpecs) {
len(a.PortSpecs) != len(b.PortSpecs) ||
len(a.Entrypoint) != len(b.Entrypoint) {
return false
}
@ -67,6 +72,20 @@ func MergeConfig(userConf, imageConf *Config) {
}
if userConf.PortSpecs == nil || len(userConf.PortSpecs) == 0 {
userConf.PortSpecs = imageConf.PortSpecs
} else {
for _, imagePortSpec := range imageConf.PortSpecs {
found := false
imageNat, _ := parseNat(imagePortSpec)
for _, userPortSpec := range userConf.PortSpecs {
userNat, _ := parseNat(userPortSpec)
if imageNat.Proto == userNat.Proto && imageNat.Frontend == userNat.Frontend {
found = true
}
}
if !found {
userConf.PortSpecs = append(userConf.PortSpecs, imagePortSpec)
}
}
}
if !userConf.Tty {
userConf.Tty = imageConf.Tty
@ -79,14 +98,38 @@ func MergeConfig(userConf, imageConf *Config) {
}
if userConf.Env == nil || len(userConf.Env) == 0 {
userConf.Env = imageConf.Env
} else {
for _, imageEnv := range imageConf.Env {
found := false
imageEnvKey := strings.Split(imageEnv, "=")[0]
for _, userEnv := range userConf.Env {
userEnvKey := strings.Split(userEnv, "=")[0]
if imageEnvKey == userEnvKey {
found = true
}
}
if !found {
userConf.Env = append(userConf.Env, imageEnv)
}
}
}
if userConf.Cmd == nil || len(userConf.Cmd) == 0 {
userConf.Cmd = imageConf.Cmd
}
if userConf.Dns == nil || len(userConf.Dns) == 0 {
userConf.Dns = imageConf.Dns
} else {
//duplicates aren't an issue here
userConf.Dns = append(userConf.Dns, imageConf.Dns...)
}
if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 {
userConf.Entrypoint = imageConf.Entrypoint
}
if userConf.Volumes == nil || len(userConf.Volumes) == 0 {
userConf.Volumes = imageConf.Volumes
} else {
for k, v := range imageConf.Volumes {
userConf.Volumes[k] = v
}
}
}

View file

@ -14,6 +14,7 @@ import (
"net/http"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strconv"
@ -87,7 +88,7 @@ func (r *progressReader) Read(p []byte) (n int, err error) {
}
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
if r.readTotal > 0 {
fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%2.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
} else {
fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a")
}
@ -106,7 +107,7 @@ func (r *progressReader) Close() error {
func ProgressReader(r io.ReadCloser, size int, output io.Writer, template []byte, sf *StreamFormatter) *progressReader {
tpl := string(template)
if tpl == "" {
tpl = string(sf.FormatProgress("", "%v/%v (%v)"))
tpl = string(sf.FormatProgress("", "%8v/%v (%v)"))
}
return &progressReader{r, NewWriteFlusher(output), size, 0, 0, tpl, sf}
}
@ -147,7 +148,7 @@ func HumanSize(size int64) string {
sizef = sizef / 1000.0
i++
}
return fmt.Sprintf("%5.4g %s", sizef, units[i])
return fmt.Sprintf("%.4g %s", sizef, units[i])
}
func Trunc(s string, maxlen int) string {
@ -247,30 +248,54 @@ func (r *bufReader) Close() error {
type WriteBroadcaster struct {
sync.Mutex
writers map[io.WriteCloser]struct{}
buf *bytes.Buffer
writers map[StreamWriter]bool
}
func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser) {
type StreamWriter struct {
wc io.WriteCloser
stream string
}
func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) {
w.Lock()
w.writers[writer] = struct{}{}
sw := StreamWriter{wc: writer, stream: stream}
w.writers[sw] = true
w.Unlock()
}
// FIXME: Is that function used?
// FIXME: This relies on the concrete writer type used having equality operator
func (w *WriteBroadcaster) RemoveWriter(writer io.WriteCloser) {
w.Lock()
delete(w.writers, writer)
w.Unlock()
type JSONLog struct {
Log string `json:"log,omitempty"`
Stream string `json:"stream,omitempty"`
Created time.Time `json:"time"`
}
func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
w.Lock()
defer w.Unlock()
for writer := range w.writers {
if n, err := writer.Write(p); err != nil || n != len(p) {
w.buf.Write(p)
for sw := range w.writers {
lp := p
if sw.stream != "" {
lp = nil
for {
line, err := w.buf.ReadString('\n')
if err != nil {
w.buf.Write([]byte(line))
break
}
b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now()})
if err != nil {
// On error, evict the writer
delete(w.writers, sw)
continue
}
lp = append(lp, b...)
}
}
if n, err := sw.wc.Write(lp); err != nil || n != len(lp) {
// On error, evict the writer
delete(w.writers, writer)
delete(w.writers, sw)
}
}
return len(p), nil
@ -279,15 +304,15 @@ func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
func (w *WriteBroadcaster) CloseWriters() error {
w.Lock()
defer w.Unlock()
for writer := range w.writers {
writer.Close()
for sw := range w.writers {
sw.wc.Close()
}
w.writers = make(map[io.WriteCloser]struct{})
w.writers = make(map[StreamWriter]bool)
return nil
}
func NewWriteBroadcaster() *WriteBroadcaster {
return &WriteBroadcaster{writers: make(map[io.WriteCloser]struct{})}
return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)}
}
func GetTotalUsedFds() int {
@ -700,3 +725,26 @@ func ParseRepositoryTag(repos string) (string, string) {
}
return repos, ""
}
// UserLookup check if the given username or uid is present in /etc/passwd
// and returns the user struct.
// If the username is not found, an error is returned.
func UserLookup(uid string) (*user.User, error) {
file, err := ioutil.ReadFile("/etc/passwd")
if err != nil {
return nil, err
}
for _, line := range strings.Split(string(file), "\n") {
data := strings.Split(line, ":")
if len(data) > 5 && (data[0] == uid || data[2] == uid) {
return &user.User{
Uid: data[2],
Gid: data[3],
Username: data[0],
Name: data[4],
HomeDir: data[5],
}, nil
}
}
return nil, fmt.Errorf("User not found in /etc/passwd")
}

View file

@ -5,6 +5,7 @@ import (
"errors"
"io"
"io/ioutil"
"strings"
"testing"
)
@ -59,9 +60,9 @@ func TestWriteBroadcaster(t *testing.T) {
// Test 1: Both bufferA and bufferB should contain "foo"
bufferA := &dummyWriter{}
writer.AddWriter(bufferA)
writer.AddWriter(bufferA, "")
bufferB := &dummyWriter{}
writer.AddWriter(bufferB)
writer.AddWriter(bufferB, "")
writer.Write([]byte("foo"))
if bufferA.String() != "foo" {
@ -75,7 +76,7 @@ func TestWriteBroadcaster(t *testing.T) {
// Test2: bufferA and bufferB should contain "foobar",
// while bufferC should only contain "bar"
bufferC := &dummyWriter{}
writer.AddWriter(bufferC)
writer.AddWriter(bufferC, "")
writer.Write([]byte("bar"))
if bufferA.String() != "foobar" {
@ -90,35 +91,22 @@ func TestWriteBroadcaster(t *testing.T) {
t.Errorf("Buffer contains %v", bufferC.String())
}
// Test3: Test removal
writer.RemoveWriter(bufferB)
writer.Write([]byte("42"))
if bufferA.String() != "foobar42" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferB.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferB.String())
}
if bufferC.String() != "bar42" {
t.Errorf("Buffer contains %v", bufferC.String())
}
// Test4: Test eviction on failure
// Test3: Test eviction on failure
bufferA.failOnWrite = true
writer.Write([]byte("fail"))
if bufferA.String() != "foobar42" {
if bufferA.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferC.String() != "bar42fail" {
if bufferC.String() != "barfail" {
t.Errorf("Buffer contains %v", bufferC.String())
}
// Even though we reset the flag, no more writes should go in there
bufferA.failOnWrite = false
writer.Write([]byte("test"))
if bufferA.String() != "foobar42" {
if bufferA.String() != "foobar" {
t.Errorf("Buffer contains %v", bufferA.String())
}
if bufferC.String() != "bar42failtest" {
if bufferC.String() != "barfailtest" {
t.Errorf("Buffer contains %v", bufferC.String())
}
@ -140,7 +128,7 @@ func TestRaceWriteBroadcaster(t *testing.T) {
writer := NewWriteBroadcaster()
c := make(chan bool)
go func() {
writer.AddWriter(devNullCloser(0))
writer.AddWriter(devNullCloser(0), "")
c <- true
}()
writer.Write([]byte("hello"))
@ -264,14 +252,16 @@ func TestCompareKernelVersion(t *testing.T) {
func TestHumanSize(t *testing.T) {
size1000 := HumanSize(1000)
if size1000 != " 1 kB" {
t.Errorf("1000 -> expected 1 kB, got %s", size1000)
size := strings.Trim(HumanSize(1000), " \t")
expect := "1 kB"
if size != expect {
t.Errorf("1000 -> expected '%s', got '%s'", expect, size)
}
size1024 := HumanSize(1024)
if size1024 != "1.024 kB" {
t.Errorf("1024 -> expected 1.024 kB, got %s", size1024)
size = strings.Trim(HumanSize(1024), " \t")
expect = "1.024 kB"
if size != expect {
t.Errorf("1024 -> expected '%s', got '%s'", expect, size)
}
}

View file

@ -1,13 +1,13 @@
package docker
import (
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"os"
"path"
"strings"
"testing"
"github.com/dotcloud/docker/utils"
)
// This file contains utility functions for docker's unit test suite.
@ -139,3 +139,61 @@ func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e
output = string(data)
return
}
func TestMergeConfig(t *testing.T) {
volumesImage := make(map[string]struct{})
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
configImage := &Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
Volumes: volumesImage,
}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{
Dns: []string{"3.3.3.3"},
PortSpecs: []string{"2222:3333", "3333:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
MergeConfig(configUser, configImage)
if len(configUser.Dns) != 3 {
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
}
for _, dns := range configUser.Dns {
if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" {
t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns)
}
}
if len(configUser.PortSpecs) != 3 {
t.Fatalf("Expected 3 portSpecs, 1111:1111, 2222:3333 and 3333:3333, found %d", len(configUser.PortSpecs))
}
for _, portSpecs := range configUser.PortSpecs {
if portSpecs != "1111:1111" && portSpecs != "2222:3333" && portSpecs != "3333:3333" {
t.Fatalf("Expected 1111:1111 or 2222:3333 or 3333:3333, found %s", portSpecs)
}
}
if len(configUser.Env) != 3 {
t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env))
}
for _, env := range configUser.Env {
if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" {
t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env)
}
}
if len(configUser.Volumes) != 3 {
t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes))
}
for v, _ := range configUser.Volumes {
if v != "/test1" && v != "/test2" && v != "/test3" {
t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v)
}
}
}