1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge branch 'master' into new_logs

This commit is contained in:
Victor Vieux 2013-07-15 13:57:54 +00:00
commit 5756ba9bc4
27 changed files with 489 additions and 758 deletions

35
api.go
View file

@ -47,21 +47,22 @@ func parseMultipartForm(r *http.Request) error {
}
func httpError(w http.ResponseWriter, err error) {
statusCode := http.StatusInternalServerError
if strings.HasPrefix(err.Error(), "No such") {
http.Error(w, err.Error(), http.StatusNotFound)
statusCode = http.StatusNotFound
} else if strings.HasPrefix(err.Error(), "Bad parameter") {
http.Error(w, err.Error(), http.StatusBadRequest)
statusCode = http.StatusBadRequest
} else if strings.HasPrefix(err.Error(), "Conflict") {
http.Error(w, err.Error(), http.StatusConflict)
statusCode = http.StatusConflict
} else if strings.HasPrefix(err.Error(), "Impossible") {
http.Error(w, err.Error(), http.StatusNotAcceptable)
statusCode = http.StatusNotAcceptable
} else if strings.HasPrefix(err.Error(), "Wrong login/password") {
http.Error(w, err.Error(), http.StatusUnauthorized)
statusCode = http.StatusUnauthorized
} else if strings.Contains(err.Error(), "hasn't been activated") {
http.Error(w, err.Error(), http.StatusForbidden)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
statusCode = http.StatusForbidden
}
utils.Debugf("[error %d] %s", statusCode, err)
http.Error(w, err.Error(), statusCode)
}
func writeJSON(w http.ResponseWriter, b []byte) {
@ -250,6 +251,23 @@ func getContainersChanges(srv *Server, version float64, w http.ResponseWriter, r
return nil
}
func getContainersTop(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
name := vars["name"]
procsStr, err := srv.ContainerTop(name)
if err != nil {
return err
}
b, err := json.Marshal(procsStr)
if err != nil {
return err
}
writeJSON(w, b)
return nil
}
func getContainersJSON(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
@ -842,6 +860,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
"/containers/{name:.*}/export": getContainersExport,
"/containers/{name:.*}/changes": getContainersChanges,
"/containers/{name:.*}/json": getContainersByName,
"/containers/{name:.*}/top": getContainersTop,
},
"POST": {
"/auth": postAuth,

View file

@ -26,6 +26,13 @@ type APIInfo struct {
SwapLimit bool `json:",omitempty"`
}
type APITop struct {
PID string
Tty string
Time string
Cmd string
}
type APIRmi struct {
Deleted string `json:",omitempty"`
Untagged string `json:",omitempty"`

View file

@ -41,10 +41,8 @@ func TestGetBoolParam(t *testing.T) {
}
func TestGetVersion(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
var err error
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -65,10 +63,7 @@ func TestGetVersion(t *testing.T) {
}
func TestGetInfo(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -95,10 +90,7 @@ func TestGetInfo(t *testing.T) {
}
func TestGetImagesJSON(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -220,10 +212,7 @@ func TestGetImagesJSON(t *testing.T) {
}
func TestGetImagesViz(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -248,10 +237,7 @@ func TestGetImagesViz(t *testing.T) {
}
func TestGetImagesHistory(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -272,10 +258,7 @@ func TestGetImagesHistory(t *testing.T) {
}
func TestGetImagesByName(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -295,10 +278,7 @@ func TestGetImagesByName(t *testing.T) {
}
func TestGetContainersJSON(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -334,10 +314,7 @@ func TestGetContainersJSON(t *testing.T) {
}
func TestGetContainersExport(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -389,10 +366,7 @@ func TestGetContainersExport(t *testing.T) {
}
func TestGetContainersChanges(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -436,7 +410,7 @@ func TestGetContainersChanges(t *testing.T) {
}
}
func TestGetContainersByName(t *testing.T) {
func TestGetContainersTop(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
@ -447,6 +421,58 @@ func TestGetContainersByName(t *testing.T) {
builder := NewBuilder(runtime)
container, err := builder.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/sh", "-c", "sleep 2"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
t.Fatal(err)
}
// Give some time to the process to start
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Errorf("Container should be running")
}
r := httptest.NewRecorder()
if err := getContainersTop(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
procs := []APITop{}
if err := json.Unmarshal(r.Body.Bytes(), &procs); err != nil {
t.Fatal(err)
}
if len(procs) != 2 {
t.Fatalf("Expected 2 processes, found %d.", len(procs))
}
if procs[0].Cmd != "sh" && procs[0].Cmd != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs[0].Cmd)
}
if procs[1].Cmd != "sh" && procs[1].Cmd != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs[1].Cmd)
}
}
func TestGetContainersByName(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
builder := NewBuilder(runtime)
// Create a container and remove a file
container, err := builder.Create(
&Config{
@ -473,10 +499,7 @@ func TestGetContainersByName(t *testing.T) {
}
func TestPostCommit(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -521,249 +544,8 @@ func TestPostCommit(t *testing.T) {
}
}
func TestPostImagesCreate(t *testing.T) {
// FIXME: Use the staging in order to perform tests
// runtime, err := newTestRuntime()
// if err != nil {
// t.Fatal(err)
// }
// defer nuke(runtime)
// srv := &Server{runtime: runtime}
// stdin, stdinPipe := io.Pipe()
// stdout, stdoutPipe := io.Pipe()
// c1 := make(chan struct{})
// go func() {
// defer close(c1)
// r := &hijackTester{
// ResponseRecorder: httptest.NewRecorder(),
// in: stdin,
// out: stdoutPipe,
// }
// req, err := http.NewRequest("POST", "/images/create?fromImage="+unitTestImageName, bytes.NewReader([]byte{}))
// if err != nil {
// t.Fatal(err)
// }
// body, err := postImagesCreate(srv, r, req, nil)
// if err != nil {
// t.Fatal(err)
// }
// if body != nil {
// t.Fatalf("No body expected, received: %s\n", body)
// }
// }()
// // Acknowledge hijack
// setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() {
// stdout.Read([]byte{})
// stdout.Read(make([]byte, 4096))
// })
// setTimeout(t, "Waiting for imagesCreate output", 5*time.Second, func() {
// reader := bufio.NewReader(stdout)
// line, err := reader.ReadString('\n')
// if err != nil {
// t.Fatal(err)
// }
// if !strings.HasPrefix(line, "Pulling repository d from") {
// t.Fatalf("Expected Pulling repository docker-ut from..., found %s", line)
// }
// })
// // Close pipes (client disconnects)
// if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
// t.Fatal(err)
// }
// // Wait for imagesCreate to finish, the client disconnected, therefore, Create finished his job
// setTimeout(t, "Waiting for imagesCreate timed out", 10*time.Second, func() {
// <-c1
// })
}
func TestPostImagesInsert(t *testing.T) {
// runtime, err := newTestRuntime()
// if err != nil {
// t.Fatal(err)
// }
// defer nuke(runtime)
// srv := &Server{runtime: runtime}
// stdin, stdinPipe := io.Pipe()
// stdout, stdoutPipe := io.Pipe()
// // Attach to it
// c1 := make(chan struct{})
// go func() {
// defer close(c1)
// r := &hijackTester{
// ResponseRecorder: httptest.NewRecorder(),
// in: stdin,
// out: stdoutPipe,
// }
// req, err := http.NewRequest("POST", "/images/"+unitTestImageName+"/insert?path=%2Ftest&url=https%3A%2F%2Fraw.github.com%2Fdotcloud%2Fdocker%2Fmaster%2FREADME.md", bytes.NewReader([]byte{}))
// if err != nil {
// t.Fatal(err)
// }
// if err := postContainersCreate(srv, r, req, nil); err != nil {
// t.Fatal(err)
// }
// }()
// // Acknowledge hijack
// setTimeout(t, "hijack acknowledge timed out", 5*time.Second, func() {
// stdout.Read([]byte{})
// stdout.Read(make([]byte, 4096))
// })
// id := ""
// setTimeout(t, "Waiting for imagesInsert output", 10*time.Second, func() {
// for {
// reader := bufio.NewReader(stdout)
// id, err = reader.ReadString('\n')
// if err != nil {
// t.Fatal(err)
// }
// }
// })
// // Close pipes (client disconnects)
// if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
// t.Fatal(err)
// }
// // Wait for attach to finish, the client disconnected, therefore, Attach finished his job
// setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() {
// <-c1
// })
// img, err := srv.runtime.repositories.LookupImage(id)
// if err != nil {
// t.Fatalf("New image %s expected but not found", id)
// }
// layer, err := img.layer()
// if err != nil {
// t.Fatal(err)
// }
// if _, err := os.Stat(path.Join(layer, "test")); err != nil {
// t.Fatalf("The test file has not been found")
// }
// if err := srv.runtime.graph.Delete(img.ID); err != nil {
// t.Fatal(err)
// }
}
func TestPostImagesPush(t *testing.T) {
//FIXME: Use staging in order to perform tests
// runtime, err := newTestRuntime()
// if err != nil {
// t.Fatal(err)
// }
// defer nuke(runtime)
// srv := &Server{runtime: runtime}
// stdin, stdinPipe := io.Pipe()
// stdout, stdoutPipe := io.Pipe()
// c1 := make(chan struct{})
// go func() {
// r := &hijackTester{
// ResponseRecorder: httptest.NewRecorder(),
// in: stdin,
// out: stdoutPipe,
// }
// req, err := http.NewRequest("POST", "/images/docker-ut/push", bytes.NewReader([]byte{}))
// if err != nil {
// t.Fatal(err)
// }
// body, err := postImagesPush(srv, r, req, map[string]string{"name": "docker-ut"})
// close(c1)
// if err != nil {
// t.Fatal(err)
// }
// if body != nil {
// t.Fatalf("No body expected, received: %s\n", body)
// }
// }()
// // Acknowledge hijack
// setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() {
// stdout.Read([]byte{})
// stdout.Read(make([]byte, 4096))
// })
// setTimeout(t, "Waiting for imagesCreate output", 5*time.Second, func() {
// reader := bufio.NewReader(stdout)
// line, err := reader.ReadString('\n')
// if err != nil {
// t.Fatal(err)
// }
// if !strings.HasPrefix(line, "Processing checksum") {
// t.Fatalf("Processing checksum..., found %s", line)
// }
// })
// // Close pipes (client disconnects)
// if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
// t.Fatal(err)
// }
// // Wait for imagesPush to finish, the client disconnected, therefore, Push finished his job
// setTimeout(t, "Waiting for imagesPush timed out", 10*time.Second, func() {
// <-c1
// })
}
func TestPostImagesTag(t *testing.T) {
// FIXME: Use staging in order to perform tests
// runtime, err := newTestRuntime()
// if err != nil {
// t.Fatal(err)
// }
// defer nuke(runtime)
// srv := &Server{runtime: runtime}
// r := httptest.NewRecorder()
// req, err := http.NewRequest("POST", "/images/docker-ut/tag?repo=testrepo&tag=testtag", bytes.NewReader([]byte{}))
// if err != nil {
// t.Fatal(err)
// }
// body, err := postImagesTag(srv, r, req, map[string]string{"name": "docker-ut"})
// if err != nil {
// t.Fatal(err)
// }
// if body != nil {
// t.Fatalf("No body expected, received: %s\n", body)
// }
// if r.Code != http.StatusCreated {
// t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code)
// }
}
func TestPostContainersCreate(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -814,10 +596,7 @@ func TestPostContainersCreate(t *testing.T) {
}
func TestPostContainersKill(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -859,10 +638,7 @@ func TestPostContainersKill(t *testing.T) {
}
func TestPostContainersRestart(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -916,10 +692,7 @@ func TestPostContainersRestart(t *testing.T) {
}
func TestPostContainersStart(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -969,10 +742,7 @@ func TestPostContainersStart(t *testing.T) {
}
func TestPostContainersStop(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -1019,10 +789,7 @@ func TestPostContainersStop(t *testing.T) {
}
func TestPostContainersWait(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -1064,10 +831,7 @@ func TestPostContainersWait(t *testing.T) {
}
func TestPostContainersAttach(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -1153,10 +917,7 @@ func TestPostContainersAttach(t *testing.T) {
// FIXME: Test deleting container with volume
// FIXME: Test deleting volume in use by other container
func TestDeleteContainers(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -1196,10 +957,7 @@ func TestDeleteContainers(t *testing.T) {
}
func TestOptionsRoute(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime, enableCors: true}
@ -1222,10 +980,7 @@ func TestOptionsRoute(t *testing.T) {
}
func TestGetEnabledCors(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime, enableCors: true}
@ -1263,10 +1018,7 @@ func TestGetEnabledCors(t *testing.T) {
}
func TestDeleteImages(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}

View file

@ -173,6 +173,27 @@ func (b *buildFile) CmdEntrypoint(args string) error {
return nil
}
func (b *buildFile) CmdVolume(args string) error {
if args == "" {
return fmt.Errorf("Volume cannot be empty")
}
var volume []string
if err := json.Unmarshal([]byte(args), &volume); err != nil {
volume = []string{args}
}
if b.config.Volumes == nil {
b.config.Volumes = NewPathOpts()
}
for _, v := range volume {
b.config.Volumes[v] = struct{}{}
}
if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
return err
}
return nil
}
func (b *buildFile) addRemote(container *Container, orig, dest string) error {
file, err := utils.Download(orig, ioutil.Discard)
if err != nil {

View file

@ -87,6 +87,15 @@ run [ "$FOO" = "BAR" ]
from %s
ENTRYPOINT /bin/echo
CMD Hello world
`,
nil,
},
{
`
from %s
VOLUME /test
CMD Hello world
`,
nil,
},
@ -96,10 +105,7 @@ CMD Hello world
func TestBuild(t *testing.T) {
for _, ctx := range testContexts {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
@ -114,3 +120,39 @@ func TestBuild(t *testing.T) {
}
}
}
func TestVolume(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
buildfile := NewBuildFile(srv, ioutil.Discard)
imgId, err := buildfile.Build(mkTestContext(`
from %s
VOLUME /test
CMD Hello world
`, nil, t))
if err != nil {
t.Fatal(err)
}
img, err := srv.ImageInspect(imgId)
if err != nil {
t.Fatal(err)
}
if len(img.Config.Volumes) == 0 {
t.Fail()
}
for key, _ := range img.Config.Volumes {
if key != "/test" {
t.Fail()
}
}
}

View file

@ -89,6 +89,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
{"login", "Register or Login to the docker registry server"},
{"logs", "Fetch the logs of a container"},
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
{"top", "Lookup the running processes of a container"},
{"ps", "List containers"},
{"pull", "Pull an image or a repository from the docker registry server"},
{"push", "Push an image or a repository to the docker registry server"},
@ -279,47 +280,66 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
return readStringOnRawTerminal(stdin, stdout, false)
}
oldState, err := term.SetRawTerminal(cli.terminalFd)
cmd := Subcmd("login", "[OPTIONS]", "Register or Login to the docker registry server")
flUsername := cmd.String("u", "", "username")
flPassword := cmd.String("p", "", "password")
flEmail := cmd.String("e", "", "email")
err := cmd.Parse(args)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
cmd := Subcmd("login", "", "Register or Login to the docker registry server")
if err := cmd.Parse(args); err != nil {
return nil
}
var oldState *term.State
if *flUsername == "" || *flPassword == "" || *flEmail == "" {
oldState, err = term.SetRawTerminal(cli.terminalFd)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
}
var (
username string
password string
email string
)
fmt.Fprintf(cli.out, "Username (%s):", cli.authConfig.Username)
username = readAndEchoString(cli.in, cli.out)
if username == "" {
username = cli.authConfig.Username
if *flUsername == "" {
fmt.Fprintf(cli.out, "Username (%s): ", cli.authConfig.Username)
username = readAndEchoString(cli.in, cli.out)
if username == "" {
username = cli.authConfig.Username
}
} else {
username = *flUsername
}
if username != cli.authConfig.Username {
fmt.Fprintf(cli.out, "Password: ")
password = readString(cli.in, cli.out)
if password == "" {
return fmt.Errorf("Error : Password Required")
if *flPassword == "" {
fmt.Fprintf(cli.out, "Password: ")
password = readString(cli.in, cli.out)
if password == "" {
return fmt.Errorf("Error : Password Required")
}
} else {
password = *flPassword
}
fmt.Fprintf(cli.out, "Email (%s): ", cli.authConfig.Email)
email = readAndEchoString(cli.in, cli.out)
if email == "" {
email = cli.authConfig.Email
if *flEmail == "" {
fmt.Fprintf(cli.out, "Email (%s): ", cli.authConfig.Email)
email = readAndEchoString(cli.in, cli.out)
if email == "" {
email = cli.authConfig.Email
}
} else {
email = *flEmail
}
} else {
password = cli.authConfig.Password
email = cli.authConfig.Email
}
term.RestoreTerminal(cli.terminalFd, oldState)
if oldState != nil {
term.RestoreTerminal(cli.terminalFd, oldState)
}
cli.authConfig.Username = username
cli.authConfig.Password = password
cli.authConfig.Email = email
@ -554,6 +574,33 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
return nil
}
func (cli *DockerCli) CmdTop(args ...string) error {
cmd := Subcmd("top", "CONTAINER", "Lookup the running processes of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top", nil)
if err != nil {
return err
}
var procs []APITop
err = json.Unmarshal(body, &procs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
fmt.Fprintln(w, "PID\tTTY\tTIME\tCMD")
for _, proc := range procs {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", proc.PID, proc.Tty, proc.Time, proc.Cmd)
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdPort(args ...string) error {
cmd := Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
@ -564,6 +611,13 @@ func (cli *DockerCli) CmdPort(args ...string) error {
return nil
}
port := cmd.Arg(1)
proto := "Tcp"
parts := strings.SplitN(port, "/", 2)
if len(parts) == 2 && len(parts[1]) != 0 {
port = parts[0]
proto = strings.ToUpper(parts[1][:1]) + strings.ToLower(parts[1][1:])
}
body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
if err != nil {
return err
@ -574,7 +628,7 @@ func (cli *DockerCli) CmdPort(args ...string) error {
return err
}
if frontend, exists := out.NetworkSettings.PortMapping[cmd.Arg(1)]; exists {
if frontend, exists := out.NetworkSettings.PortMapping[proto][port]; exists {
fmt.Fprintf(cli.out, "%s\n", frontend)
} else {
return fmt.Errorf("Error: No private port '%s' allocated on %s", cmd.Arg(1), cmd.Arg(0))
@ -767,7 +821,9 @@ func (cli *DockerCli) CmdPull(args ...string) error {
}
remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0))
*tag = parsedTag
if *tag == "" {
*tag = parsedTag
}
v := url.Values{}
v.Set("fromImage", remote)

View file

@ -59,79 +59,6 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
return nil
}
/*TODO
func cmdImages(srv *Server, args ...string) (string, error) {
stdout, stdoutPipe := io.Pipe()
go func() {
if err := srv.CmdImages(nil, stdoutPipe, args...); err != nil {
return
}
// force the pipe closed, so that the code below gets an EOF
stdoutPipe.Close()
}()
output, err := ioutil.ReadAll(stdout)
if err != nil {
return "", err
}
// Cleanup pipes
return string(output), closeWrap(stdout, stdoutPipe)
}
// TestImages checks that 'docker images' displays information correctly
func TestImages(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
output, err := cmdImages(srv)
if !strings.Contains(output, "REPOSITORY") {
t.Fatal("'images' should have a header")
}
if !strings.Contains(output, "docker-ut") {
t.Fatal("'images' should show the docker-ut image")
}
if !strings.Contains(output, "e9aa60c60128") {
t.Fatal("'images' should show the docker-ut image id")
}
output, err = cmdImages(srv, "-q")
if strings.Contains(output, "REPOSITORY") {
t.Fatal("'images -q' should not have a header")
}
if strings.Contains(output, "docker-ut") {
t.Fatal("'images' should not show the docker-ut image name")
}
if !strings.Contains(output, "e9aa60c60128") {
t.Fatal("'images' should show the docker-ut image id")
}
output, err = cmdImages(srv, "-viz")
if !strings.HasPrefix(output, "digraph docker {") {
t.Fatal("'images -v' should start with the dot header")
}
if !strings.HasSuffix(output, "}\n") {
t.Fatal("'images -v' should end with a '}'")
}
if !strings.Contains(output, "base -> \"e9aa60c60128\" [style=invis]") {
t.Fatal("'images -v' should have the docker-ut image id node")
}
// todo: add checks for -a
}
*/
// TestRunHostname checks that 'docker run -h' correctly sets a custom hostname
func TestRunHostname(t *testing.T) {
@ -164,163 +91,6 @@ func TestRunHostname(t *testing.T) {
}
/*
func TestRunExit(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c1 := make(chan struct{})
go func() {
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", GetTestImage(runtime).Id, "/bin/cat")
close(c1)
}()
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
container := runtime.List()[0]
// Closing /bin/cat stdin, expect it to exit
p, err := container.StdinPipe()
if err != nil {
t.Fatal(err)
}
if err := p.Close(); err != nil {
t.Fatal(err)
}
// as the process exited, CmdRun must finish and unblock. Wait for it
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
<-c1
cmdWait(srv, container)
})
// Make sure that the client has been disconnected
setTimeout(t, "The client should have been disconnected once the remote process exited.", 2*time.Second, func() {
// Expecting pipe i/o error, just check that read does not block
stdin.Read([]byte{})
})
// Cleanup pipes
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
}
// Expected behaviour: the process dies when the client disconnects
func TestRunDisconnect(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c1 := make(chan struct{})
go func() {
// We're simulating a disconnect so the return value doesn't matter. What matters is the
// fact that CmdRun returns.
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", GetTestImage(runtime).Id, "/bin/cat")
close(c1)
}()
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
// Close pipes (simulate disconnect)
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
// as the pipes are close, we expect the process to die,
// therefore CmdRun to unblock. Wait for CmdRun
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
<-c1
})
// Client disconnect after run -i should cause stdin to be closed, which should
// cause /bin/cat to exit.
setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
container := runtime.List()[0]
container.Wait()
if container.State.Running {
t.Fatalf("/bin/cat is still running after closing stdin")
}
})
}
// Expected behaviour: the process dies when the client disconnects
func TestRunDisconnectTty(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c1 := make(chan struct{})
go func() {
// We're simulating a disconnect so the return value doesn't matter. What matters is the
// fact that CmdRun returns.
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", "-t", GetTestImage(runtime).Id, "/bin/cat")
close(c1)
}()
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for {
// Client disconnect after run -i should keep stdin out in TTY mode
l := runtime.List()
if len(l) == 1 && l[0].State.Running {
break
}
time.Sleep(10 * time.Millisecond)
}
})
// Client disconnect after run -i should keep stdin out in TTY mode
container := runtime.List()[0]
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
// Close pipes (simulate disconnect)
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
// In tty mode, we expect the process to stay alive even after client's stdin closes.
// Do not wait for run to finish
// Give some time to monitor to do his thing
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)")
}
}
*/
// TestAttachStdin checks attaching to stdin without stdout and stderr.
// 'docker run -i -a stdin' should sends the client's stdin to the command,
@ -387,74 +157,3 @@ func TestRunAttachStdin(t *testing.T) {
}
}
}
/*
// Expected behaviour, the process stays alive when the client disconnects
func TestAttachDisconnect(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
container, err := NewBuilder(runtime).Create(
&Config{
Image: GetTestImage(runtime).Id,
CpuShares: 1000,
Memory: 33554432,
Cmd: []string{"/bin/cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
// Start the process
if err := container.Start(); err != nil {
t.Fatal(err)
}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
// Attach to it
c1 := make(chan struct{})
go func() {
// We're simulating a disconnect so the return value doesn't matter. What matters is the
// fact that CmdAttach returns.
srv.CmdAttach(stdin, rcli.NewDockerLocalConn(stdoutPipe), container.Id)
close(c1)
}()
setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
// Close pipes (client disconnects)
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
// Wait for attach to finish, the client disconnected, therefore, Attach finished his job
setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() {
<-c1
})
// We closed stdin, expect /bin/cat to still be running
// Wait a little bit to make sure container.monitor() did his thing
err = container.WaitTimeout(500 * time.Millisecond)
if err == nil || !container.State.Running {
t.Fatalf("/bin/cat is not running after closing stdin")
}
// Try to avoid the timeoout in destroy. Best effort, don't check error
cStdin, _ := container.StdinPipe()
cStdin.Close()
container.Wait()
}
*/

View file

@ -270,6 +270,26 @@ func (container *Container) ToDisk() (err error) {
return ioutil.WriteFile(container.jsonPath(), data, 0666)
}
func (container *Container) ReadHostConfig() (*HostConfig, error) {
data, err := ioutil.ReadFile(container.hostConfigPath())
if err != nil {
return &HostConfig{}, err
}
hostConfig := &HostConfig{}
if err := json.Unmarshal(data, hostConfig); err != nil {
return &HostConfig{}, err
}
return hostConfig, nil
}
func (container *Container) SaveHostConfig(hostConfig *HostConfig) (err error) {
data, err := json.Marshal(hostConfig)
if err != nil {
return
}
return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
}
func (container *Container) generateLXCConfig() error {
fo, err := os.Create(container.lxcConfigPath())
if err != nil {
@ -473,6 +493,9 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
func (container *Container) Start(hostConfig *HostConfig) error {
container.State.Lock()
defer container.State.Unlock()
if len(hostConfig.Binds) == 0 {
hostConfig, _ = container.ReadHostConfig()
}
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.ID)
@ -649,6 +672,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
container.waitLock = make(chan struct{})
container.ToDisk()
container.SaveHostConfig(hostConfig)
go container.monitor()
return nil
}
@ -987,6 +1011,10 @@ func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) hostConfigPath() string {
return path.Join(container.root, "hostconfig.json")
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}

View file

@ -1046,10 +1046,7 @@ func TestEnv(t *testing.T) {
}
func TestEntrypoint(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(
&Config{
@ -1125,10 +1122,7 @@ func TestLXCConfig(t *testing.T) {
}
func BenchmarkRunSequencial(b *testing.B) {
runtime, err := newTestRuntime()
if err != nil {
b.Fatal(err)
}
runtime := mkRuntime(b)
defer nuke(runtime)
for i := 0; i < b.N; i++ {
container, err := NewBuilder(runtime).Create(&Config{
@ -1154,10 +1148,7 @@ func BenchmarkRunSequencial(b *testing.B) {
}
func BenchmarkRunParallel(b *testing.B) {
runtime, err := newTestRuntime()
if err != nil {
b.Fatal(err)
}
runtime := mkRuntime(b)
defer nuke(runtime)
var tasks []chan error

View file

@ -29,6 +29,11 @@ You can still call an old version of the api using /v1.0/images/<name>/insert
What's new
----------
Listing processes (/top):
- List the processes inside a container
Builder (/build):
- Simplify the upload of the build context

View file

@ -220,6 +220,46 @@ Inspect a container
:statuscode 500: server error
List processes running inside a container
*****************************************
.. http:get:: /containers/(id)/top
List processes running inside the container ``id``
**Example request**:
.. sourcecode:: http
GET /containers/4fa6e0f0c678/top HTTP/1.1
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"PID":"11935",
"Tty":"pts/2",
"Time":"00:00:00",
"Cmd":"sh"
},
{
"PID":"12140",
"Tty":"pts/2",
"Time":"00:00:00",
"Cmd":"sleep"
}
]
:statuscode 200: no error
:statuscode 404: no such container
:statuscode 500: server error
Inspect changes on a container's filesystem
*******************************************

View file

@ -52,5 +52,6 @@ Available Commands
command/start
command/stop
command/tag
command/top
command/version
command/wait

View file

@ -8,6 +8,10 @@
::
Usage: docker login
Usage: docker login [OPTIONS]
Register or Login to the docker registry server
-e="": email
-p="": password
-u="": username

View file

@ -0,0 +1,13 @@
:title: Top Command
:description: Lookup the running processes of a container
:keywords: top, docker, container, documentation
=======================================================
``top`` -- Lookup the running processes of a container
=======================================================
::
Usage: docker top CONTAINER
Lookup the running processes of a container

View file

@ -48,12 +48,12 @@ Docker will ignore lines in Dockerfiles prefixed with "`#`", so you may add
comment lines. A comment marker in the rest of the line will be treated as an
argument.
2. Instructions
3. Instructions
===============
Docker builder comes with a set of instructions, described below.
2.1 FROM
3.1 FROM
--------
``FROM <image>``
@ -65,7 +65,7 @@ a valid Dockerfile must have it as its first instruction.
create multiple images. Simply make a note of the last image id output by the
commit before each new `FROM` command.
2.2 MAINTAINER
3.2 MAINTAINER
--------------
``MAINTAINER <name>``
@ -73,7 +73,7 @@ commit before each new `FROM` command.
The `MAINTAINER` instruction allows you to set the Author field of the generated
images.
2.3 RUN
3.3 RUN
-------
``RUN <command>``
@ -86,7 +86,7 @@ Layering `RUN` instructions and generating commits conforms to the
core concepts of Docker where commits are cheap and containers can be created
from any point in an image's history, much like source control.
2.4 CMD
3.4 CMD
-------
``CMD <command>``
@ -100,7 +100,7 @@ This is functionally equivalent to running
the result; `CMD` does not execute anything at build time, but specifies the
intended command for the image.
2.5 EXPOSE
3.5 EXPOSE
----------
``EXPOSE <port> [<port>...]``
@ -109,7 +109,7 @@ The `EXPOSE` instruction sets ports to be publicly exposed when running the
image. This is functionally equivalent to running
`docker commit -run '{"PortSpecs": ["<port>", "<port2>"]}'` outside the builder.
2.6 ENV
3.6 ENV
-------
``ENV <key> <value>``
@ -121,7 +121,7 @@ functionally equivalent to prefixing the command with `<key>=<value>`
.. note::
The environment variables will persist when a container is run from the resulting image.
2.7 ADD
3.7 ADD
-------
``ADD <src> <dest>``
@ -153,14 +153,21 @@ of `<src>` will be written at `<dst>`.
If `<dest>` doesn't exist, it is created along with all missing directories in its path. All new
files and directories are created with mode 0700, uid and gid 0.
2.8 ENTRYPOINT
3.8 ENTRYPOINT
-------------
``ENTRYPOINT /bin/echo``
The `ENTRYPOINT` instruction adds an entry command that will not be overwritten when arguments are passed to docker run, unlike the behavior of `CMD`. This allows arguments to be passed to the entrypoint. i.e. `docker run <image> -d` will pass the "-d" argument to the entrypoint.
3. Dockerfile Examples
3.9 VOLUME
----------
``VOLUME ["/data"]``
The `VOLUME` instruction will add one or more new volumes to any container created from the image.
4. Dockerfile Examples
======================
.. code-block:: bash

View file

@ -13,8 +13,8 @@ PKG_NAME=lxc-docker
ROOT_PATH=$(shell git rev-parse --show-toplevel)
GITHUB_PATH=github.com/dotcloud/docker
BUILD_SRC=build_src
VERSION_TAG?=v$(shell sed -E 's/.+\((.+)-.+\).+/\1/;q' changelog)
VERSION=$(shell echo ${VERSION_TAG} | cut -c2-)
VERSION=$(shell sed -En '0,/^\#\# /{s/^\#\# ([^ ]+).+/\1/p}' ../../CHANGELOG.md)
VERSION_TAG?=v${VERSION}
DOCKER_VERSION=${PKG_NAME}_${VERSION}
all:
@ -28,7 +28,6 @@ install:
mkdir -p $(DESTDIR)/usr/share/doc/lxc-docker
install -m 0755 src/${GITHUB_PATH}/docker/docker $(DESTDIR)/usr/bin/lxc-docker
cp debian/lxc-docker.1 $(DESTDIR)/usr/share/man/man1
cp debian/CHANGELOG.md $(DESTDIR)/usr/share/doc/lxc-docker/changelog
debian:
# Prepare docker source from revision ${VERSION_TAG}
@ -41,6 +40,7 @@ debian:
cp -r `ls | grep -v ${BUILD_SRC}` ${BUILD_SRC}/debian
cp ${ROOT_PATH}/README.md ${BUILD_SRC}
cp ${ROOT_PATH}/CHANGELOG.md ${BUILD_SRC}/debian
./parse_changelog.py < ../../CHANGELOG.md > ${BUILD_SRC}/debian/changelog
# Cleanup
rm -rf `find . -name '.git*'`
rm -f ${DOCKER_VERSION}*

View file

@ -13,6 +13,9 @@ Vagrant::Config.run do |config|
# Install debian packaging dependencies and create debian packages
pkg_cmd = "apt-get -qq update; DEBIAN_FRONTEND=noninteractive apt-get install -qq -y #{PKG_DEP}; " \
"curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.1.linux-amd64.tar.gz; " \
"tar -C /usr/local -xzf /go.tar.gz; rm /usr/bin/go; " \
"ln -s /usr/local/go/bin/go /usr/bin; "\
"export GPG_KEY='#{ENV['GPG_KEY']}'; cd /data/docker/packaging/debian; make debian"
config.vm.provision :shell, :inline => pkg_cmd
end

View file

@ -0,0 +1,23 @@
#!/usr/bin/env python
'Parse main CHANGELOG.md from stdin outputing on stdout the debian changelog'
import sys,re, datetime
on_block=False
for line in sys.stdin.readlines():
line = line.strip()
if line.startswith('# ') or len(line) == 0:
continue
if line.startswith('## '):
if on_block:
print '\n -- dotCloud <ops@dotcloud.com> {0}\n'.format(date)
version, date = line[3:].split()
date = datetime.datetime.strptime(date, '(%Y-%m-%d)').strftime(
'%a, %d %b %Y 00:00:00 -0700')
on_block = True
print 'lxc-docker ({0}-1) precise; urgency=low'.format(version)
continue
if on_block:
print ' ' + line
print '\n -- dotCloud <ops@dotcloud.com> {0}'.format(date)

View file

@ -141,7 +141,6 @@ func (runtime *Runtime) Register(container *Container) error {
utils.Debugf("Restarting")
container.State.Ghost = false
container.State.setStopped(0)
// assume empty host config
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
return err

View file

@ -114,26 +114,6 @@ func init() {
// FIXME: test that ImagePull(json=true) send correct json output
func newTestRuntime() (*Runtime, error) {
root, err := ioutil.TempDir("", "docker-test")
if err != nil {
return nil, err
}
if err := os.Remove(root); err != nil {
return nil, err
}
if err := utils.CopyDirectory(unitTestStoreBase, root); err != nil {
return nil, err
}
runtime, err := NewRuntimeFromDirectory(root, false)
if err != nil {
return nil, err
}
runtime.UpdateCapabilities(true)
return runtime, nil
}
func GetTestImage(runtime *Runtime) *Image {
imgs, err := runtime.graph.All()
if err != nil {
@ -148,10 +128,7 @@ func GetTestImage(runtime *Runtime) *Image {
}
func TestRuntimeCreate(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
// Make sure we start we 0 containers
@ -223,10 +200,7 @@ func TestRuntimeCreate(t *testing.T) {
}
func TestDestroy(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
container, err := NewBuilder(runtime).Create(&Config{
Image: GetTestImage(runtime).ID,
@ -270,10 +244,7 @@ func TestDestroy(t *testing.T) {
}
func TestGet(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
builder := NewBuilder(runtime)
@ -323,11 +294,8 @@ func TestGet(t *testing.T) {
}
func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
var err error
runtime := mkRuntime(t)
port := 5554
var container *Container
var strPort string

View file

@ -1,6 +1,7 @@
package docker
import (
"bufio"
"errors"
"fmt"
"github.com/dotcloud/docker/auth"
@ -12,6 +13,7 @@ import (
"net/http"
"net/url"
"os"
"os/exec"
"path"
"runtime"
"strings"
@ -98,7 +100,7 @@ func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.
return "", err
}
if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("Downloading", "%v/%v (%v)"), sf), path); err != nil {
if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("Downloading", "%8v/%v (%v)"), sf), path); err != nil {
return "", err
}
// FIXME: Handle custom repo, tag comment, author
@ -247,6 +249,40 @@ func (srv *Server) ImageHistory(name string) ([]APIHistory, error) {
}
func (srv *Server) ContainerTop(name string) ([]APITop, error) {
if container := srv.runtime.Get(name); container != nil {
output, err := exec.Command("lxc-ps", "--name", container.ID).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("Error trying to use lxc-ps: %s (%s)", err, output)
}
var procs []APITop
for i, line := range strings.Split(string(output), "\n") {
if i == 0 || len(line) == 0 {
continue
}
proc := APITop{}
scanner := bufio.NewScanner(strings.NewReader(line))
scanner.Split(bufio.ScanWords)
if !scanner.Scan() {
return nil, fmt.Errorf("Error trying to use lxc-ps")
}
// no scanner.Text because we skip container id
scanner.Scan()
proc.PID = scanner.Text()
scanner.Scan()
proc.Tty = scanner.Text()
scanner.Scan()
proc.Time = scanner.Text()
scanner.Scan()
proc.Cmd = scanner.Text()
procs = append(procs, proc)
}
return procs, nil
}
return nil, fmt.Errorf("No such container: %s", name)
}
func (srv *Server) ContainerChanges(name string) ([]Change, error) {
if container := srv.runtime.Get(name); container != nil {
return container.Changes()
@ -343,7 +379,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
return err
}
defer layer.Close()
if err := srv.runtime.graph.Register(utils.ProgressReader(layer, imgSize, out, sf.FormatProgress("Downloading", "%v/%v (%v)"), sf), false, img); err != nil {
if err := srv.runtime.graph.Register(utils.ProgressReader(layer, imgSize, out, sf.FormatProgress("Downloading", "%8v/%v (%v)"), sf), false, img); err != nil {
return err
}
}
@ -666,7 +702,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
}
// Send the layer
if err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("Pushing", "%v/%v (%v)"), sf), ep, token); err != nil {
if err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("Pushing", "%8v/%v (%v)"), sf), ep, token); err != nil {
return err
}
return nil
@ -736,7 +772,7 @@ func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Write
if err != nil {
return err
}
archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf.FormatProgress("Importing", "%v/%v (%v)"), sf)
archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf.FormatProgress("Importing", "%8v/%v (%v)"), sf)
}
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
if err != nil {

View file

@ -5,10 +5,7 @@ import (
)
func TestContainerTagImageDelete(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -62,10 +59,7 @@ func TestContainerTagImageDelete(t *testing.T) {
}
func TestCreateRm(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -95,10 +89,7 @@ func TestCreateRm(t *testing.T) {
}
func TestCreateStartRestartStopStartKillRm(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
@ -154,11 +145,9 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
}
func TestRunWithTooLowMemoryLimit(t *testing.T) {
runtime, err := newTestRuntime()
var err error
runtime := mkRuntime(t)
srv := &Server{runtime: runtime}
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
_, err = srv.ContainerCreate(

View file

@ -204,15 +204,15 @@ func (store *TagStore) GetImage(repoName, tagOrID string) (*Image, error) {
} else if repo == nil {
return nil, nil
}
//go through all the tags, to see if tag is in fact an ID
if revision, exists := repo[tagOrID]; exists {
return store.graph.Get(revision)
}
// If no matching tag is found, search through images for a matching image id
for _, revision := range repo {
if strings.HasPrefix(revision, tagOrID) {
return store.graph.Get(revision)
}
}
if revision, exists := repo[tagOrID]; exists {
return store.graph.Get(revision)
}
return nil, nil
}

View file

@ -5,10 +5,7 @@ import (
)
func TestLookupImage(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
runtime := mkRuntime(t)
defer nuke(runtime)
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil {

View file

@ -20,7 +20,8 @@ func CompareConfig(a, b *Config) bool {
if len(a.Cmd) != len(b.Cmd) ||
len(a.Dns) != len(b.Dns) ||
len(a.Env) != len(b.Env) ||
len(a.PortSpecs) != len(b.PortSpecs) {
len(a.PortSpecs) != len(b.PortSpecs) ||
len(a.Entrypoint) != len(b.Entrypoint) {
return false
}
@ -89,4 +90,7 @@ func MergeConfig(userConf, imageConf *Config) {
if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 {
userConf.Entrypoint = imageConf.Entrypoint
}
if userConf.Volumes == nil || len(userConf.Volumes) == 0 {
userConf.Volumes = imageConf.Volumes
}
}

View file

@ -87,7 +87,7 @@ func (r *progressReader) Read(p []byte) (n int, err error) {
}
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
if r.readTotal > 0 {
fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%2.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
} else {
fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a")
}
@ -106,7 +106,7 @@ func (r *progressReader) Close() error {
func ProgressReader(r io.ReadCloser, size int, output io.Writer, template []byte, sf *StreamFormatter) *progressReader {
tpl := string(template)
if tpl == "" {
tpl = string(sf.FormatProgress("", "%v/%v (%v)"))
tpl = string(sf.FormatProgress("", "%8v/%v (%v)"))
}
return &progressReader{r, NewWriteFlusher(output), size, 0, 0, tpl, sf}
}
@ -147,7 +147,7 @@ func HumanSize(size int64) string {
sizef = sizef / 1000.0
i++
}
return fmt.Sprintf("%5.4g %s", sizef, units[i])
return fmt.Sprintf("%.4g %s", sizef, units[i])
}
func Trunc(s string, maxlen int) string {

View file

@ -7,6 +7,7 @@ import (
"path"
"strings"
"testing"
"github.com/dotcloud/docker/utils"
)
// This file contains utility functions for docker's unit test suite.
@ -15,14 +16,40 @@ import (
// Create a temporary runtime suitable for unit testing.
// Call t.Fatal() at the first error.
func mkRuntime(t *testing.T) *Runtime {
func mkRuntime(f Fataler) *Runtime {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
f.Fatal(err)
}
return runtime
}
// A common interface to access the Fatal method of
// both testing.B and testing.T.
type Fataler interface {
Fatal(args ...interface{})
}
func newTestRuntime() (*Runtime, error) {
root, err := ioutil.TempDir("", "docker-test")
if err != nil {
return nil, err
}
if err := os.Remove(root); err != nil {
return nil, err
}
if err := utils.CopyDirectory(unitTestStoreBase, root); err != nil {
return nil, err
}
runtime, err := NewRuntimeFromDirectory(root, false)
if err != nil {
return nil, err
}
runtime.UpdateCapabilities(true)
return runtime, nil
}
// Write `content` to the file at path `dst`, creating it if necessary,
// as well as any missing directories.
// The file is truncated if it already exists.