Integration tests: remove dependency on private APIs

This commit is contained in:
Victor Vieux 2013-11-14 06:10:20 +00:00 committed by Victor Vieux
parent 359a6f49b9
commit c001a5af67
8 changed files with 528 additions and 1120 deletions

View File

@ -1,76 +1,35 @@
package docker
import (
"archive/tar"
"bufio"
"bytes"
_ "archive/tar"
_ "bufio"
_ "bytes"
"encoding/json"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io"
"net"
_ "net"
"net/http"
"net/http/httptest"
"os"
"path"
"strings"
_ "os"
_ "path"
_ "strings"
"testing"
"time"
)
func TestGetBoolParam(t *testing.T) {
if ret, err := getBoolParam("true"); err != nil || !ret {
t.Fatalf("true -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("True"); err != nil || !ret {
t.Fatalf("True -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("1"); err != nil || !ret {
t.Fatalf("1 -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam(""); err != nil || ret {
t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("false"); err != nil || ret {
t.Fatalf("false -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("0"); err != nil || ret {
t.Fatalf("0 -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("faux"); err == nil || ret {
t.Fatalf("faux -> false, err | got %t %s", ret, err)
}
}
func TesthttpError(t *testing.T) {
r := httptest.NewRecorder()
httpError(r, fmt.Errorf("No such method"))
if r.Code != http.StatusNotFound {
t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
}
httpError(r, fmt.Errorf("This accound hasn't been activated"))
if r.Code != http.StatusForbidden {
t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
}
httpError(r, fmt.Errorf("Some error"))
if r.Code != http.StatusInternalServerError {
t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
}
}
/*
func TestGetVersion(t *testing.T) {
var err error
runtime := mkRuntime(t)
runtime := mkRuntime()
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
var err error
r := httptest.NewRecorder()
if err := getVersion(srv, APIVERSION, r, nil, nil); err != nil {
// FIXME getting the version should require an actual running Server
if err := getVersion(srv, docker.APIVERSION, r, nil, nil); err != nil {
t.Fatal(err)
}
@ -83,11 +42,29 @@ func TestGetVersion(t *testing.T) {
}
}
var err error
r := httptest.NewRecorder()
// FIXME getting the version should require an actual running Server
if err := getVersion(&docker.Server{}, docker.APIVERSION, r, nil, nil); err != nil {
t.Fatal(err)
}
v := &APIVersion{}
if err = json.Unmarshal(r.Body.Bytes(), v); err != nil {
t.Fatal(err)
}
if v.Version != VERSION {
t.Errorf("Expected version %s, %s found", VERSION, v.Version)
}
}
func TestGetInfo(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
initialImages, err := srv.runtime.graph.Map()
if err != nil {
@ -96,7 +73,7 @@ func TestGetInfo(t *testing.T) {
r := httptest.NewRecorder()
if err := getInfo(srv, APIVERSION, r, nil, nil); err != nil {
if err := getInfo(srv, docker.APIVERSION, r, nil, nil); err != nil {
t.Fatal(err)
}
@ -109,18 +86,25 @@ func TestGetInfo(t *testing.T) {
t.Errorf("Expected images: %d, %d found", len(initialImages), infos.Images)
}
}
*/
func TestGetEvents(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
// FIXME: we might not need runtime, why not simply nuke
// the engine?
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
events: make([]utils.JSONMessage, 0, 64),
listeners: make(map[string]chan utils.JSONMessage),
}
srv.LogEvent("fakeaction", "fakeid", "fakeimage")
srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
var events []*utils.JSONMessage
for _, parts := range [][3]string{
{"fakeaction", "fakeid", "fakeimage"},
{"fakeaction2", "fakeid", "fakeimage"},
} {
action, id, from := parts[0], parts[1], parts[2]
ev := srv.LogEvent(action, id, from)
events = append(events, ev)
}
req, err := http.NewRequest("GET", "/events?since=1", nil)
if err != nil {
@ -129,7 +113,7 @@ func TestGetEvents(t *testing.T) {
r := httptest.NewRecorder()
setTimeout(t, "", 500*time.Millisecond, func() {
if err := getEvents(srv, APIVERSION, r, req, nil); err != nil {
if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
})
@ -142,18 +126,20 @@ func TestGetEvents(t *testing.T) {
} else if err != nil {
t.Fatal(err)
}
if jm != srv.events[i] {
if jm != *events[i] {
t.Fatalf("Event received it different than expected")
}
}
}
/*
func TestGetImagesJSON(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
// all=0
@ -169,7 +155,7 @@ func TestGetImagesJSON(t *testing.T) {
r := httptest.NewRecorder()
if err := getImagesJSON(srv, APIVERSION, r, req, nil); err != nil {
if err := getImagesJSON(srv, docker.APIVERSION, r, req, nil); err != nil {
t.Fatal(err)
}
@ -207,7 +193,7 @@ func TestGetImagesJSON(t *testing.T) {
t.Fatal(err)
}
if err := getImagesJSON(srv, APIVERSION, r2, req2, nil); err != nil {
if err := getImagesJSON(srv, docker.APIVERSION, r2, req2, nil); err != nil {
t.Fatal(err)
}
@ -239,7 +225,7 @@ func TestGetImagesJSON(t *testing.T) {
t.Fatal(err)
}
if err := getImagesJSON(srv, APIVERSION, r3, req3, nil); err != nil {
if err := getImagesJSON(srv, docker.APIVERSION, r3, req3, nil); err != nil {
t.Fatal(err)
}
@ -260,7 +246,7 @@ func TestGetImagesJSON(t *testing.T) {
t.Fatal(err)
}
err = getImagesJSON(srv, APIVERSION, r4, req4, nil)
err = getImagesJSON(srv, docker.APIVERSION, r4, req4, nil)
if err == nil {
t.Fatalf("Error expected, received none")
}
@ -279,11 +265,11 @@ func TestGetImagesHistory(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
r := httptest.NewRecorder()
if err := getImagesHistory(srv, APIVERSION, r, nil, map[string]string{"name": unitTestImageName}); err != nil {
if err := getImagesHistory(srv, docker.APIVERSION, r, nil, map[string]string{"name": unitTestImageName}); err != nil {
t.Fatal(err)
}
@ -300,14 +286,14 @@ func TestGetImagesByName(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
r := httptest.NewRecorder()
if err := getImagesByName(srv, APIVERSION, r, nil, map[string]string{"name": unitTestImageName}); err != nil {
if err := getImagesByName(srv, docker.APIVERSION, r, nil, map[string]string{"name": unitTestImageName}); err != nil {
t.Fatal(err)
}
img := &Image{}
img := &docker.Image{}
if err := json.Unmarshal(r.Body.Bytes(), img); err != nil {
t.Fatal(err)
}
@ -320,7 +306,7 @@ func TestGetContainersJSON(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
beginLen := runtime.containers.Len()
@ -339,7 +325,7 @@ func TestGetContainersJSON(t *testing.T) {
}
r := httptest.NewRecorder()
if err := getContainersJSON(srv, APIVERSION, r, req, nil); err != nil {
if err := getContainersJSON(srv, docker.APIVERSION, r, req, nil); err != nil {
t.Fatal(err)
}
containers := []APIContainers{}
@ -358,7 +344,7 @@ func TestGetContainersExport(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
// Create a container and remove a file
container, _, err := runtime.Create(
@ -378,7 +364,7 @@ func TestGetContainersExport(t *testing.T) {
}
r := httptest.NewRecorder()
if err = getContainersExport(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
if err = getContainersExport(srv, docker.APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
@ -409,7 +395,7 @@ func TestGetContainersChanges(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
// Create a container and remove a file
container, _, err := runtime.Create(
@ -429,7 +415,7 @@ func TestGetContainersChanges(t *testing.T) {
}
r := httptest.NewRecorder()
if err := getContainersChanges(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
if err := getContainersChanges(srv, docker.APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
changes := []Change{}
@ -454,7 +440,7 @@ func TestGetContainersTop(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
container, _, err := runtime.Create(
&Config{
@ -505,7 +491,7 @@ func TestGetContainersTop(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if err := getContainersTop(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
if err := getContainersTop(srv, docker.APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
procs := APITop{}
@ -535,7 +521,7 @@ func TestGetContainersByName(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
// Create a container and remove a file
container, _, err := runtime.Create(
@ -551,10 +537,10 @@ func TestGetContainersByName(t *testing.T) {
defer runtime.Destroy(container)
r := httptest.NewRecorder()
if err := getContainersByName(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
if err := getContainersByName(srv, docker.APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
outContainer := &Container{}
outContainer := &docker.Container{}
if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil {
t.Fatal(err)
}
@ -567,7 +553,7 @@ func TestPostCommit(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
// Create a container and remove a file
container, _, err := runtime.Create(
@ -592,7 +578,7 @@ func TestPostCommit(t *testing.T) {
}
r := httptest.NewRecorder()
if err := postCommit(srv, APIVERSION, r, req, nil); err != nil {
if err := postCommit(srv, docker.APIVERSION, r, req, nil); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusCreated {
@ -629,7 +615,7 @@ func TestPostContainersCreate(t *testing.T) {
}
r := httptest.NewRecorder()
if err := postContainersCreate(srv, APIVERSION, r, req, nil); err != nil {
if err := postContainersCreate(srv, docker.APIVERSION, r, req, nil); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusCreated {
@ -663,7 +649,7 @@ func TestPostContainersKill(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
container, _, err := runtime.Create(
&Config{
@ -690,7 +676,7 @@ func TestPostContainersKill(t *testing.T) {
}
r := httptest.NewRecorder()
if err := postContainersKill(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
if err := postContainersKill(srv, docker.APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusNoContent {
@ -705,7 +691,7 @@ func TestPostContainersRestart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
container, _, err := runtime.Create(
&Config{
@ -736,7 +722,7 @@ func TestPostContainersRestart(t *testing.T) {
t.Fatal(err)
}
r := httptest.NewRecorder()
if err := postContainersRestart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
if err := postContainersRestart(srv, docker.APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusNoContent {
@ -780,7 +766,7 @@ func TestPostContainersStart(t *testing.T) {
req.Header.Set("Content-Type", "application/json")
r := httptest.NewRecorder()
if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err != nil {
if err := postContainersStart(srv, docker.APIVERSION, r, req, map[string]string{"name": id}); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusNoContent {
@ -799,7 +785,7 @@ func TestPostContainersStart(t *testing.T) {
}
r = httptest.NewRecorder()
if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err == nil {
if err = postContainersStart(srv, docker.APIVERSION, r, req, map[string]string{"name": id}); err == nil {
t.Fatalf("A running container should be able to be started")
}
@ -812,7 +798,7 @@ func TestPostContainersStop(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
container, _, err := runtime.Create(
&Config{
@ -844,7 +830,7 @@ func TestPostContainersStop(t *testing.T) {
t.Fatal(err)
}
r := httptest.NewRecorder()
if err := postContainersStop(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
if err := postContainersStop(srv, docker.APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusNoContent {
@ -859,7 +845,7 @@ func TestPostContainersWait(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
container, _, err := runtime.Create(
&Config{
@ -880,7 +866,7 @@ func TestPostContainersWait(t *testing.T) {
setTimeout(t, "Wait timed out", 3*time.Second, func() {
r := httptest.NewRecorder()
if err := postContainersWait(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
if err := postContainersWait(srv, docker.APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
apiWait := &APIWait{}
@ -901,7 +887,7 @@ func TestPostContainersAttach(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
container, _, err := runtime.Create(
&Config{
@ -946,7 +932,7 @@ func TestPostContainersAttach(t *testing.T) {
t.Fatal(err)
}
if err := postContainersAttach(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
if err := postContainersAttach(srv, docker.APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
}()
@ -990,7 +976,7 @@ func TestPostContainersAttachStderr(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
container, _, err := runtime.Create(
&Config{
@ -1035,7 +1021,7 @@ func TestPostContainersAttachStderr(t *testing.T) {
t.Fatal(err)
}
if err := postContainersAttach(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
if err := postContainersAttach(srv, docker.APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
}()
@ -1082,7 +1068,7 @@ func TestDeleteContainers(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
container, _, err := runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
@ -1102,7 +1088,7 @@ func TestDeleteContainers(t *testing.T) {
t.Fatal(err)
}
r := httptest.NewRecorder()
if err := deleteContainers(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
if err := deleteContainers(srv, docker.APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusNoContent {
@ -1123,7 +1109,7 @@ func TestOptionsRoute(t *testing.T) {
defer nuke(runtime)
runtime.config.EnableCors = true
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
r := httptest.NewRecorder()
router, err := createRouter(srv, false)
@ -1147,7 +1133,7 @@ func TestGetEnabledCors(t *testing.T) {
defer nuke(runtime)
runtime.config.EnableCors = true
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
r := httptest.NewRecorder()
@ -1185,7 +1171,7 @@ func TestDeleteImages(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
initialImages, err := srv.Images(false, "")
if err != nil {
@ -1211,7 +1197,7 @@ func TestDeleteImages(t *testing.T) {
}
r := httptest.NewRecorder()
if err := deleteImages(srv, APIVERSION, r, req, map[string]string{"name": unitTestImageID}); err == nil {
if err := deleteImages(srv, docker.APIVERSION, r, req, map[string]string{"name": unitTestImageID}); err == nil {
t.Fatalf("Expected conflict error, got none")
}
@ -1221,7 +1207,7 @@ func TestDeleteImages(t *testing.T) {
}
r2 := httptest.NewRecorder()
if err := deleteImages(srv, APIVERSION, r2, req2, map[string]string{"name": "test:test"}); err != nil {
if err := deleteImages(srv, docker.APIVERSION, r2, req2, map[string]string{"name": "test:test"}); err != nil {
t.Fatal(err)
}
if r2.Code != http.StatusOK {
@ -1243,14 +1229,6 @@ func TestDeleteImages(t *testing.T) {
if len(images[0].RepoTags) != len(initialImages[0].RepoTags) {
t.Errorf("Expected %d image, %d found", len(initialImages), len(images))
}
/* if c := runtime.Get(container.Id); c != nil {
t.Fatalf("The container as not been deleted")
}
if _, err := os.Stat(path.Join(container.rwPath(), "test")); err == nil {
t.Fatalf("The test file has not been deleted")
} */
}
func TestJsonContentType(t *testing.T) {
@ -1271,7 +1249,7 @@ func TestPostContainersCopy(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
srv := &docker.Server{runtime: runtime}
// Create a container and remove a file
container, _, err := runtime.Create(
@ -1303,7 +1281,7 @@ func TestPostContainersCopy(t *testing.T) {
t.Fatal(err)
}
req.Header.Add("Content-Type", "application/json")
if err = postContainersCopy(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
if err = postContainersCopy(srv, docker.APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
t.Fatal(err)
}
@ -1356,3 +1334,4 @@ func (t *hijackTester) Hijack() (net.Conn, *bufio.ReadWriter, error) {
}
return conn, bufrw, nil
}
*/

View File

@ -2,7 +2,9 @@ package docker
import (
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/engine"
"io/ioutil"
"net"
"net/http"
@ -14,7 +16,7 @@ import (
// mkTestContext generates a build context from the contents of the provided dockerfile.
// This context is suitable for use as an argument to BuildFile.Build()
func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive {
context, err := mkBuildContext(dockerfile, files)
context, err := docker.MkBuildContext(dockerfile, files)
if err != nil {
t.Fatal(err)
}
@ -228,17 +230,15 @@ func TestBuild(t *testing.T) {
}
}
func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache bool) *Image {
if srv == nil {
runtime := mkRuntime(t)
func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) *docker.Image {
if eng == nil {
eng = NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
// FIXME: we might not need runtime, why not simply nuke
// the engine?
defer nuke(runtime)
srv = &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
}
srv := mkServerFromEngine(eng, t)
httpServer, err := mkTestingFileServer(context.remoteFiles)
if err != nil {
@ -252,10 +252,17 @@ func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache
}
port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP
iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, useCache, false)
buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, useCache, false)
id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err != nil {
t.Fatal(err)
@ -368,20 +375,14 @@ func TestBuildEntrypoint(t *testing.T) {
// testing #1405 - config.Cmd does not get cleaned up if
// utilizing cache
func TestBuildEntrypointRunCleanup(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
img := buildImage(testContextTemplate{`
from {IMAGE}
run echo "hello"
`,
nil, nil}, t, srv, true)
nil, nil}, t, eng, true)
img = buildImage(testContextTemplate{`
from {IMAGE}
@ -389,7 +390,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
add foo /foo
entrypoint ["/bin/echo"]
`,
[][2]string{{"foo", "HEYO"}}, nil}, t, srv, true)
[][2]string{{"foo", "HEYO"}}, nil}, t, eng, true)
if len(img.Config.Cmd) != 0 {
t.Fail()
@ -397,14 +398,8 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
}
func TestBuildImageWithCache(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
template := testContextTemplate{`
from {IMAGE}
@ -412,11 +407,11 @@ func TestBuildImageWithCache(t *testing.T) {
`,
nil, nil}
img := buildImage(template, t, srv, true)
img := buildImage(template, t, eng, true)
imageId := img.ID
img = nil
img = buildImage(template, t, srv, true)
img = buildImage(template, t, eng, true)
if imageId != img.ID {
t.Logf("Image ids should match: %s != %s", imageId, img.ID)
@ -425,14 +420,8 @@ func TestBuildImageWithCache(t *testing.T) {
}
func TestBuildImageWithoutCache(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
template := testContextTemplate{`
from {IMAGE}
@ -440,11 +429,11 @@ func TestBuildImageWithoutCache(t *testing.T) {
`,
nil, nil}
img := buildImage(template, t, srv, true)
img := buildImage(template, t, eng, true)
imageId := img.ID
img = nil
img = buildImage(template, t, srv, false)
img = buildImage(template, t, eng, false)
if imageId == img.ID {
t.Logf("Image ids should not match: %s == %s", imageId, img.ID)
@ -453,14 +442,9 @@ func TestBuildImageWithoutCache(t *testing.T) {
}
func TestForbiddenContextPath(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
srv := mkServerFromEngine(eng, t)
context := testContextTemplate{`
from {IMAGE}
@ -481,10 +465,17 @@ func TestForbiddenContextPath(t *testing.T) {
}
port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP
iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false)
buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, true, false)
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err == nil {
@ -499,14 +490,8 @@ func TestForbiddenContextPath(t *testing.T) {
}
func TestBuildADDFileNotFound(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
context := testContextTemplate{`
from {IMAGE}
@ -526,10 +511,17 @@ func TestBuildADDFileNotFound(t *testing.T) {
}
port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP
iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false)
buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, false, true, false)
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err == nil {
@ -544,26 +536,20 @@ func TestBuildADDFileNotFound(t *testing.T) {
}
func TestBuildInheritance(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
img := buildImage(testContextTemplate{`
from {IMAGE}
expose 4243
`,
nil, nil}, t, srv, true)
nil, nil}, t, eng, true)
img2 := buildImage(testContextTemplate{fmt.Sprintf(`
from %s
entrypoint ["/bin/echo"]
`, img.ID),
nil, nil}, t, srv, true)
nil, nil}, t, eng, true)
// from child
if img2.Config.Entrypoint[0] != "/bin/echo" {

View File

@ -3,6 +3,8 @@ package docker
import (
"bufio"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@ -66,8 +68,8 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
func TestRunHostname(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -111,8 +113,8 @@ func TestRunHostname(t *testing.T) {
func TestRunWorkdir(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -156,8 +158,8 @@ func TestRunWorkdir(t *testing.T) {
func TestRunWorkdirExists(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -201,8 +203,8 @@ func TestRunExit(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
go func() {
@ -254,8 +256,8 @@ func TestRunDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
go func() {
@ -299,8 +301,8 @@ func TestRunDisconnectTty(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
go func() {
@ -356,8 +358,8 @@ func TestRunAttachStdin(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
go func() {
@ -420,8 +422,8 @@ func TestRunDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
go func() {
@ -466,8 +468,8 @@ func TestAttachDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
go func() {
@ -477,7 +479,7 @@ func TestAttachDetach(t *testing.T) {
}
}()
var container *Container
var container *docker.Container
setTimeout(t, "Reading container's id timed out", 10*time.Second, func() {
buf := make([]byte, 1024)
@ -498,7 +500,7 @@ func TestAttachDetach(t *testing.T) {
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
ch = make(chan struct{})
go func() {
@ -546,8 +548,8 @@ func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
go stdout.Read(make([]byte, 1024))
setTimeout(t, "Starting container timed out", 2*time.Second, func() {
@ -560,7 +562,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
ch := make(chan struct{})
go func() {
@ -608,8 +610,8 @@ func TestAttachDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
go func() {
// Start a process in daemon mode
@ -677,8 +679,8 @@ func TestAttachDisconnect(t *testing.T) {
func TestRunAutoRemove(t *testing.T) {
t.Skip("Fixme. Skipping test for now, race condition")
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -712,8 +714,8 @@ func TestRunAutoRemove(t *testing.T) {
}
func TestCmdLogs(t *testing.T) {
cli := NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil {
t.Fatal(err)
@ -730,8 +732,8 @@ func TestCmdLogs(t *testing.T) {
// Expected behaviour: using / as a bind mount source should throw an error
func TestRunErrorBindMountRootSource(t *testing.T) {
cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -749,8 +751,8 @@ func TestRunErrorBindMountRootSource(t *testing.T) {
// Expected behaviour: error out when attempting to bind mount non-existing source paths
func TestRunErrorBindNonExistingSource(t *testing.T) {
cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -768,11 +770,10 @@ func TestRunErrorBindNonExistingSource(t *testing.T) {
func TestImagesViz(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
srv := &Server{runtime: globalRuntime}
image := buildTestImages(t, srv)
image := buildTestImages(t, globalEngine)
c := make(chan struct{})
go func() {
@ -819,11 +820,10 @@ func TestImagesViz(t *testing.T) {
func TestImagesTree(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
srv := &Server{runtime: globalRuntime}
image := buildTestImages(t, srv)
image := buildTestImages(t, globalEngine)
c := make(chan struct{})
go func() {
@ -867,7 +867,7 @@ func TestImagesTree(t *testing.T) {
})
}
func buildTestImages(t *testing.T, srv *Server) *Image {
func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image {
var testBuilder = testContextTemplate{
`
@ -880,9 +880,9 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
nil,
nil,
}
image := buildImage(testBuilder, t, srv, true)
image := buildImage(testBuilder, t, eng, true)
err := srv.ContainerTag(image.ID, "test", "latest", false)
err := mkServerFromEngine(eng, t).ContainerTag(image.ID, "test", "latest", false)
if err != nil {
t.Fatal(err)
}
@ -902,8 +902,8 @@ func TestRunCidFile(t *testing.T) {
}
tmpCidFile := path.Join(tmpDir, "cid")
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {

View File

@ -3,10 +3,10 @@ package docker
import (
"bufio"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"math/rand"
"os"
"path"
"regexp"
@ -20,7 +20,7 @@ func TestIDFormat(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/sh", "-c", "echo hello world"},
},
@ -134,7 +134,8 @@ func TestMultipleAttachRestart(t *testing.T) {
}
func TestDiff(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
// Create a container and remove a file
container1, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
@ -169,11 +170,7 @@ func TestDiff(t *testing.T) {
}
// Commit the container
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "", nil)
img, err := runtime.Commit(container1, "", "", "unit test commited image - diff", "", nil)
if err != nil {
t.Error(err)
}
@ -237,11 +234,7 @@ func TestCommitAutoRun(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", &Config{Cmd: []string{"cat", "/world"}})
img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &docker.Config{Cmd: []string{"cat", "/world"}})
if err != nil {
t.Error(err)
}
@ -297,11 +290,7 @@ func TestCommitRun(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", nil)
img, err := runtime.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
@ -391,7 +380,7 @@ func TestOutput(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"},
},
@ -414,7 +403,7 @@ func TestContainerNetwork(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
},
@ -436,7 +425,7 @@ func TestKillDifferentUser(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"},
OpenStdin: true,
@ -448,7 +437,9 @@ func TestKillDifferentUser(t *testing.T) {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer container.stdin.Close()
// FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case
// there is a side effect I'm not seeing.
// defer container.stdin.Close()
if container.State.Running {
t.Errorf("Container shouldn't be running")
@ -490,22 +481,35 @@ func TestKillDifferentUser(t *testing.T) {
// Test that creating a container with a volume doesn't crash. Regression test for #995.
func TestCreateVolume(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, hc, _, err := ParseRun([]string{"-v", "/var/lib/data", GetTestImage(runtime).ID, "echo", "hello", "world"}, nil)
config, hc, _, err := docker.ParseRun([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil)
if err != nil {
t.Fatal(err)
}
c, _, err := runtime.Create(config, "")
if err != nil {
jobCreate := eng.Job("create")
if err := jobCreate.ImportEnv(config); err != nil {
t.Fatal(err)
}
defer runtime.Destroy(c)
c.hostConfig = hc
if err := c.Start(); err != nil {
var id string
jobCreate.StdoutParseString(&id)
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
jobStart := eng.Job("start", id)
if err := jobStart.ImportEnv(hc); err != nil {
t.Fatal(err)
}
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
// FIXME: this hack can be removed once Wait is a job
c := runtime.Get(id)
if c == nil {
t.Fatalf("Couldn't retrieve container %s from runtime", id)
}
c.WaitTimeout(500 * time.Millisecond)
c.Wait()
}
@ -513,7 +517,7 @@ func TestCreateVolume(t *testing.T) {
func TestKill(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sleep", "2"},
},
@ -557,7 +561,7 @@ func TestExitCode(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
trueContainer, _, err := runtime.Create(&Config{
trueContainer, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/true", ""},
}, "")
@ -572,7 +576,7 @@ func TestExitCode(t *testing.T) {
t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode)
}
falseContainer, _, err := runtime.Create(&Config{
falseContainer, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/false", ""},
}, "")
@ -591,7 +595,7 @@ func TestExitCode(t *testing.T) {
func TestRestart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"},
},
@ -622,7 +626,7 @@ func TestRestart(t *testing.T) {
func TestRestartStdin(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"},
@ -700,7 +704,7 @@ func TestUser(t *testing.T) {
defer nuke(runtime)
// Default user must be root
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
},
@ -719,7 +723,7 @@ func TestUser(t *testing.T) {
}
// Set a username
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -740,7 +744,7 @@ func TestUser(t *testing.T) {
}
// Set a UID
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -761,7 +765,7 @@ func TestUser(t *testing.T) {
}
// Set a different user by uid
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -784,7 +788,7 @@ func TestUser(t *testing.T) {
}
// Set a different user by username
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -805,7 +809,7 @@ func TestUser(t *testing.T) {
}
// Test an wrong username
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -827,7 +831,7 @@ func TestMultipleContainers(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _, err := runtime.Create(&Config{
container1, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sleep", "2"},
},
@ -838,7 +842,7 @@ func TestMultipleContainers(t *testing.T) {
}
defer runtime.Destroy(container1)
container2, _, err := runtime.Create(&Config{
container2, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sleep", "2"},
},
@ -882,7 +886,7 @@ func TestMultipleContainers(t *testing.T) {
func TestStdin(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"},
@ -927,7 +931,7 @@ func TestStdin(t *testing.T) {
func TestTty(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"},
@ -974,7 +978,7 @@ func TestEnv(t *testing.T) {
os.Setenv("TRICKY", "tri\ncky\n")
runtime := mkRuntime(t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil)
config, _, _, err := docker.ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil)
if err != nil {
t.Fatal(err)
}
@ -1028,7 +1032,7 @@ func TestEntrypoint(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Entrypoint: []string{"/bin/echo"},
Cmd: []string{"-n", "foobar"},
@ -1052,7 +1056,7 @@ func TestEntrypointNoCmd(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Entrypoint: []string{"/bin/echo", "foobar"},
},
@ -1071,96 +1075,11 @@ func TestEntrypointNoCmd(t *testing.T) {
}
}
func grepFile(t *testing.T, path string, pattern string) {
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := bufio.NewReader(f)
var (
line string
)
err = nil
for err == nil {
line, err = r.ReadString('\n')
if strings.Contains(line, pattern) == true {
return
}
}
t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
}
func TestLXCConfig(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
// Memory is allocated randomly for testing
rand.Seed(time.Now().UTC().UnixNano())
memMin := 33554432
memMax := 536870912
mem := memMin + rand.Intn(memMax-memMin)
// CPU shares as well
cpuMin := 100
cpuMax := 10000
cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
container, _, err := runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/true"},
Hostname: "foobar",
Memory: int64(mem),
CpuShares: int64(cpu),
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
container.generateLXCConfig()
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
}
func TestCustomLxcConfig(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/true"},
Hostname: "foobar",
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
container.hostConfig = &HostConfig{LxcConf: []KeyValuePair{
{
Key: "lxc.utsname",
Value: "docker",
},
{
Key: "lxc.cgroup.cpuset.cpus",
Value: "0,1",
},
}}
container.generateLXCConfig()
grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker")
grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1")
}
func BenchmarkRunSequencial(b *testing.B) {
runtime := mkRuntime(b)
defer nuke(runtime)
for i := 0; i < b.N; i++ {
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foo"},
},
@ -1193,7 +1112,7 @@ func BenchmarkRunParallel(b *testing.B) {
complete := make(chan error)
tasks = append(tasks, complete)
go func(i int, complete chan error) {
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foo"},
},
@ -1261,11 +1180,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil)
img, err := r.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
@ -1298,11 +1213,7 @@ func TestCopyVolumeContent(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil)
img, err := r.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
@ -1344,7 +1255,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
@ -1364,7 +1275,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) {
}
container2, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: container.ID + ":ro",
@ -1405,7 +1316,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
@ -1425,7 +1336,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) {
}
container2, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: container.ID,
@ -1461,7 +1372,7 @@ func TestRestartWithVolumes(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
@ -1505,7 +1416,7 @@ func TestVolumesFromWithVolumes(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
Volumes: map[string]struct{}{"/test": {}},
@ -1534,7 +1445,7 @@ func TestVolumesFromWithVolumes(t *testing.T) {
}
container2, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat", "/test/foo"},
VolumesFrom: container.ID,
@ -1568,26 +1479,42 @@ func TestVolumesFromWithVolumes(t *testing.T) {
}
func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, hc, _, err := ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil)
config, hc, _, err := docker.ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil)
if err != nil {
t.Fatal(err)
}
c, _, err := runtime.Create(config, "")
if err != nil {
jobCreate := eng.Job("create")
if err := jobCreate.ImportEnv(config); err != nil {
t.Fatal(err)
}
var id string
jobCreate.StdoutParseString(&id)
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
// FIXME: this hack can be removed once Wait is a job
c := runtime.Get(id)
if c == nil {
t.Fatalf("Couldn't retrieve container %s from runtime", id)
}
stdout, err := c.StdoutPipe()
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(c)
c.hostConfig = hc
if err := c.Start(); err != nil {
jobStart := eng.Job("start", id)
if err := jobStart.ImportEnv(hc); err != nil {
t.Fatal(err)
}
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
c.WaitTimeout(500 * time.Millisecond)
c.Wait()
output, err := ioutil.ReadAll(stdout)
@ -1602,7 +1529,6 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
if !strings.HasSuffix(interfaces[0], ": lo") {
t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces)
}
}
func TestPrivilegedCanMknod(t *testing.T) {
@ -1641,7 +1567,7 @@ func TestMultipleVolumesFrom(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
Volumes: map[string]struct{}{"/test": {}},
@ -1670,7 +1596,7 @@ func TestMultipleVolumesFrom(t *testing.T) {
}
container2, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"},
Volumes: map[string]struct{}{"/other": {}},
@ -1692,7 +1618,7 @@ func TestMultipleVolumesFrom(t *testing.T) {
}
container3, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","),
@ -1720,7 +1646,7 @@ func TestRestartGhost(t *testing.T) {
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
Volumes: map[string]struct{}{"/test": {}},

View File

@ -3,6 +3,7 @@ package docker
import (
"bytes"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils"
@ -15,7 +16,6 @@ import (
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"testing"
"time"
@ -32,39 +32,33 @@ const (
)
var (
globalRuntime *Runtime
// FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
globalRuntime *docker.Runtime
globalEngine *engine.Engine
startFds int
startGoroutines int
)
func nuke(runtime *Runtime) error {
var wg sync.WaitGroup
for _, container := range runtime.List() {
wg.Add(1)
go func(c *Container) {
c.Kill()
wg.Done()
}(container)
}
wg.Wait()
runtime.Close()
os.Remove(filepath.Join(runtime.config.Root, "linkgraph.db"))
return os.RemoveAll(runtime.config.Root)
// FIXME: nuke() is deprecated by Runtime.Nuke()
func nuke(runtime *docker.Runtime) error {
return runtime.Nuke()
}
func cleanup(runtime *Runtime) error {
// FIXME: cleanup and nuke are redundant.
func cleanup(eng *engine.Engine, t *testing.T) error {
runtime := mkRuntimeFromEngine(eng, t)
for _, container := range runtime.List() {
container.Kill()
runtime.Destroy(container)
}
images, err := runtime.graph.Map()
srv := mkServerFromEngine(eng, t)
images, err := srv.Images(true, "")
if err != nil {
return err
}
for _, image := range images {
if image.ID != unitTestImageID {
runtime.graph.Delete(image.ID)
srv.ImageDelete(image.ID, false)
}
}
return nil
@ -133,10 +127,9 @@ func setupBaseImage() {
log.Fatalf("Unable to create a runtime for tests:", err)
}
srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
runtime := srv.runtime
// If the unit test is not found, try to download it.
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
if img, err := srv.ImageInspect(unitTestImageName); err != nil || img.ID != unitTestImageID {
// Retrieve the Image
if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
log.Fatalf("Unable to pull the test image: %s", err)
@ -151,8 +144,8 @@ func spawnGlobalDaemon() {
}
t := log.New(os.Stderr, "", 0)
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
globalRuntime = srv.runtime
globalEngine = eng
globalRuntime = mkRuntimeFromEngine(eng, t)
// Spawn a Daemon
go func() {
@ -174,8 +167,8 @@ func spawnGlobalDaemon() {
// FIXME: test that ImagePull(json=true) send correct json output
func GetTestImage(runtime *Runtime) *Image {
imgs, err := runtime.graph.Map()
func GetTestImage(runtime *docker.Runtime) *docker.Image {
imgs, err := runtime.Graph().Map()
if err != nil {
log.Fatalf("Unable to get the test image:", err)
}
@ -184,7 +177,7 @@ func GetTestImage(runtime *Runtime) *Image {
return image
}
}
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.graph.Root, imgs)
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs)
return nil
}
@ -197,7 +190,7 @@ func TestRuntimeCreate(t *testing.T) {
t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
}
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
@ -239,12 +232,12 @@ func TestRuntimeCreate(t *testing.T) {
}
// Make sure create with bad parameters returns an error
if _, _, err = runtime.Create(&Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
t.Fatal("Builder.Create should throw an error when Cmd is missing")
}
if _, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{},
},
@ -253,7 +246,7 @@ func TestRuntimeCreate(t *testing.T) {
t.Fatal("Builder.Create should throw an error when Cmd is empty")
}
config := &Config{
config := &docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/ls"},
PortSpecs: []string{"80"},
@ -266,7 +259,7 @@ func TestRuntimeCreate(t *testing.T) {
}
// test expose 80:8000
container, warnings, err := runtime.Create(&Config{
container, warnings, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
PortSpecs: []string{"80:8000"},
@ -285,7 +278,7 @@ func TestDestroy(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
}, "")
@ -312,12 +305,6 @@ func TestDestroy(t *testing.T) {
t.Errorf("Unable to get newly created container")
}
// Make sure the container root directory does not exist anymore
_, err = os.Stat(container.root)
if err == nil || !os.IsNotExist(err) {
t.Errorf("Container root directory still exists after destroy")
}
// Test double destroy
if err := runtime.Destroy(container); err == nil {
// It should have failed
@ -352,15 +339,21 @@ func TestGet(t *testing.T) {
}
func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) {
var (
err error
container *Container
strPort string
runtime = mkRuntime(t)
port = 5554
p Port
err error
id string
strPort string
eng = NewTestEngine(t)
runtime = mkRuntimeFromEngine(eng, t)
port = 5554
p docker.Port
)
defer func() {
if err != nil {
runtime.Nuke()
}
}()
for {
port += 1
@ -373,37 +366,45 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container,
} else {
t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
}
ep := make(map[Port]struct{}, 1)
p = Port(fmt.Sprintf("%s/%s", strPort, proto))
ep := make(map[docker.Port]struct{}, 1)
p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto))
ep[p] = struct{}{}
container, _, err = runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", cmd},
PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
ExposedPorts: ep,
}, "")
if err != nil {
nuke(runtime)
jobCreate := eng.Job("create")
jobCreate.Setenv("Image", unitTestImageID)
jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd})
jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)})
jobCreate.SetenvJson("ExposedPorts", ep)
jobCreate.StdoutParseString(&id)
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
if container != nil {
// FIXME: this relies on the undocumented behavior of runtime.Create
// which will return a nil error AND container if the exposed ports
// are invalid. That behavior should be fixed!
if id != "" {
break
}
t.Logf("Port %v already in use, trying another one", strPort)
}
container.hostConfig = &HostConfig{
PortBindings: make(map[Port][]PortBinding),
}
container.hostConfig.PortBindings[p] = []PortBinding{
jobStart := eng.Job("start", id)
portBindings := make(map[docker.Port][]docker.PortBinding)
portBindings[p] = []docker.PortBinding{
{},
}
if err := container.Start(); err != nil {
nuke(runtime)
if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil {
t.Fatal(err)
}
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
container := runtime.Get(id)
if container == nil {
t.Fatalf("Couldn't fetch test container %s", id)
}
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for !container.State.Running {
@ -504,8 +505,9 @@ func TestAllocateUDPPortLocalhost(t *testing.T) {
}
func TestRestore(t *testing.T) {
runtime1 := mkRuntime(t)
defer nuke(runtime1)
eng := NewTestEngine(t)
runtime1 := mkRuntimeFromEngine(eng, t)
defer runtime1.Nuke()
// Create a container with one instance of docker
container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
defer runtime1.Destroy(container1)
@ -545,12 +547,14 @@ func TestRestore(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
runtime1.config.AutoRestart = false
runtime2, err := NewRuntimeFromDirectory(runtime1.config)
if err != nil {
job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("AutoRestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
defer nuke(runtime2)
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
}
@ -575,7 +579,24 @@ func TestRestore(t *testing.T) {
}
func TestReloadContainerLinks(t *testing.T) {
runtime1 := mkRuntime(t)
// FIXME: here we don't use NewTestEngine because it calls initapi with Autorestart=false,
// and we want to set it to true.
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", true)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime1 := mkRuntimeFromEngine(eng, t)
defer nuke(runtime1)
// Create a container with one instance of docker
container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
@ -590,7 +611,9 @@ func TestReloadContainerLinks(t *testing.T) {
t.Fatal(err)
}
// Add a link to container 2
container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
// FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink().
// Why do we need it @crosbymichael?
// container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
t.Fatal(err)
}
@ -612,12 +635,13 @@ func TestReloadContainerLinks(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
runtime1.config.AutoRestart = true
runtime2, err := NewRuntimeFromDirectory(runtime1.config)
if err != nil {
job = eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("AutoRestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
defer nuke(runtime2)
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
}
@ -631,27 +655,32 @@ func TestReloadContainerLinks(t *testing.T) {
t.Fatalf("Expected 2 container alive, %d found", runningCount)
}
// FIXME: we no longer test if containers were registered in the right order,
// because there is no public
// Make sure container 2 ( the child of container 1 ) was registered and started first
// with the runtime
first := runtime2.containers.Front()
if first.Value.(*Container).ID != container2.ID {
//
containers := runtime2.List()
if len(containers) == 0 {
t.Fatalf("Runtime has no containers")
}
first := containers[0]
if first.ID != container2.ID {
t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID)
}
// Verify that the link is still registered in the runtime
entity := runtime2.containerGraph.Get(container1.Name)
if entity == nil {
t.Fatal("Entity should not be nil")
if c := runtime2.Get(container1.Name); c == nil {
t.Fatal("Named container is no longer registered after restart")
}
}
func TestDefaultContainerName(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -663,29 +692,19 @@ func TestDefaultContainerName(t *testing.T) {
t.Fatalf("Expect /some_name got %s", container.Name)
}
paths := runtime.containerGraph.RefPaths(containerID)
if paths == nil || len(paths) == 0 {
t.Fatalf("Could not find edges for %s", containerID)
}
edge := paths[0]
if edge.ParentID != "0" {
t.Fatalf("Expected engine got %s", edge.ParentID)
}
if edge.EntityID != containerID {
t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
}
if edge.Name != "some_name" {
t.Fatalf("Expected some_name got %s", edge.Name)
if c := runtime.Get("/some_name"); c == nil {
t.Fatalf("Couldn't retrieve test container as /some_name")
} else if c.ID != containerID {
t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
}
}
func TestRandomContainerName(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -697,29 +716,19 @@ func TestRandomContainerName(t *testing.T) {
t.Fatalf("Expected not empty container name")
}
paths := runtime.containerGraph.RefPaths(containerID)
if paths == nil || len(paths) == 0 {
t.Fatalf("Could not find edges for %s", containerID)
}
edge := paths[0]
if edge.ParentID != "0" {
t.Fatalf("Expected engine got %s", edge.ParentID)
}
if edge.EntityID != containerID {
t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
}
if edge.Name == "" {
t.Fatalf("Expected not empty container name")
if c := runtime.Get(container.Name); c == nil {
log.Fatalf("Could not lookup container %s by its name", container.Name)
} else if c.ID != containerID {
log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
}
}
func TestLinkChildContainer(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -735,7 +744,7 @@ func TestLinkChildContainer(t *testing.T) {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
}
config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -758,11 +767,10 @@ func TestLinkChildContainer(t *testing.T) {
func TestGetAllChildren(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -778,7 +786,7 @@ func TestGetAllChildren(t *testing.T) {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
}
config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -810,19 +818,3 @@ func TestGetAllChildren(t *testing.T) {
}
}
}
func TestGetFullName(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
name, err := runtime.getFullName("testing")
if err != nil {
t.Fatal(err)
}
if name != "/testing" {
t.Fatalf("Expected /testing got %s", name)
}
if _, err := runtime.getFullName(""); err == nil {
t.Fatal("Error should not be nil")
}
}

View File

@ -1,32 +1,31 @@
package docker
import (
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"strings"
"testing"
"time"
)
func TestContainerTagImageDelete(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := &Server{runtime: runtime}
srv := mkServerFromEngine(eng, t)
initialImages, err := srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
t.Fatal(err)
}
@ -82,46 +81,43 @@ func TestContainerTagImageDelete(t *testing.T) {
func TestCreateRm(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(c))
}
if err = srv.ContainerDestroy(id, true, false); err != nil {
t.Fatal(err)
}
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(c))
}
}
func TestCreateRmVolumes(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, hostConfig, _, err := ParseRun([]string{"-v", "/srv", GetTestImage(runtime).ID, "echo test"}, nil)
config, hostConfig, _, err := docker.ParseRun([]string{"-v", "/srv", unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(c))
}
job := eng.Job("start", id)
@ -141,18 +137,17 @@ func TestCreateRmVolumes(t *testing.T) {
t.Fatal(err)
}
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(c))
}
}
func TestCommit(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil)
if err != nil {
t.Fatal(err)
}
@ -167,18 +162,17 @@ func TestCommit(t *testing.T) {
func TestCreateStartRestartStopStartKillRm(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(c))
}
job := eng.Job("start", id)
@ -214,21 +208,18 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
t.Fatal(err)
}
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(c))
}
}
func TestRunWithTooLowMemoryLimit(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
job := eng.Job("create")
job.Setenv("Image", GetTestImage(runtime).ID)
job.Setenv("Image", unitTestImageID)
job.Setenv("Memory", "524287")
job.Setenv("CpuShares", "1000")
job.SetenvList("Cmd", []string{"/bin/cat"})
@ -239,163 +230,17 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) {
}
}
func TestContainerTop(t *testing.T) {
t.Skip("Fixme. Skipping test for now. Reported error: 'server_test.go:236: Expected 2 processes, found 1.'")
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
c, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
c, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(c)
if err := c.Start(); err != nil {
t.Fatal(err)
}
// Give some time to the process to start
c.WaitTimeout(500 * time.Millisecond)
if !c.State.Running {
t.Errorf("Container should be running")
}
procs, err := srv.ContainerTop(c.ID, "")
if err != nil {
t.Fatal(err)
}
if len(procs.Processes) != 2 {
t.Fatalf("Expected 2 processes, found %d.", len(procs.Processes))
}
pos := -1
for i := 0; i < len(procs.Titles); i++ {
if procs.Titles[i] == "CMD" {
pos = i
break
}
}
if pos == -1 {
t.Fatalf("Expected CMD, not found.")
}
if procs.Processes[0][pos] != "sh" && procs.Processes[0][pos] != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[0][pos])
}
if procs.Processes[1][pos] != "sh" && procs.Processes[1][pos] != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[1][pos])
}
}
func TestPools(t *testing.T) {
runtime := mkRuntime(t)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
defer nuke(runtime)
err := srv.poolAdd("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("push", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("pull", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("push", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
}
func TestLogEvent(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
events: make([]utils.JSONMessage, 0, 64),
listeners: make(map[string]chan utils.JSONMessage),
}
srv.LogEvent("fakeaction", "fakeid", "fakeimage")
listener := make(chan utils.JSONMessage)
srv.Lock()
srv.listeners["test"] = listener
srv.Unlock()
srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
if len(srv.events) != 2 {
t.Fatalf("Expected 2 events, found %d", len(srv.events))
}
go func() {
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
}()
setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
for i := 2; i < 4; i++ {
event := <-listener
if event != srv.events[i] {
t.Fatalf("Event received it different than expected")
}
}
})
}
func TestRmi(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
initialImages, err := srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -471,19 +316,19 @@ func TestRmi(t *testing.T) {
}
func TestImagesFilter(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
srv := &Server{runtime: runtime}
srv := mkServerFromEngine(eng, t)
if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
t.Fatal(err)
}
@ -525,9 +370,9 @@ func TestImagesFilter(t *testing.T) {
}
func TestImageInsert(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
sf := utils.NewStreamFormatter(true)
// bad image name fails
@ -536,12 +381,12 @@ func TestImageInsert(t *testing.T) {
}
// bad url fails
if err := srv.ImageInsert(GetTestImage(runtime).ID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil {
if err := srv.ImageInsert(unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil {
t.Fatal("expected an error and got none")
}
// success returns nil
if err := srv.ImageInsert(GetTestImage(runtime).ID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil {
if err := srv.ImageInsert(unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil {
t.Fatalf("expected no error, but got %v", err)
}
}

View File

@ -1,25 +1,21 @@
package docker
import (
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"testing"
"time"
)
func TestServerListOrderedImagesByCreationDate(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
archive, err := fakeTar()
if err != nil {
if err := generateImage("", srv); err != nil {
t.Fatal(err)
}
_, err = runtime.graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
srv := &Server{runtime: runtime}
images, err := srv.Images(true, "")
if err != nil {
@ -32,22 +28,22 @@ func TestServerListOrderedImagesByCreationDate(t *testing.T) {
}
func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
err := generateImage("bar", runtime)
err := generateImage("bar", srv)
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
err = generateImage("zed", runtime)
err = generateImage("zed", srv)
if err != nil {
t.Fatal(err)
}
srv := &Server{runtime: runtime}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
@ -58,54 +54,10 @@ func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
}
}
func generateImage(name string, runtime *Runtime) error {
func generateImage(name string, srv *docker.Server) error {
archive, err := fakeTar()
if err != nil {
return err
}
image, err := runtime.graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
return err
}
srv := &Server{runtime: runtime}
srv.ContainerTag(image.ID, "repo", name, false)
return nil
}
func TestSortUniquePorts(t *testing.T) {
ports := []Port{
Port("6379/tcp"),
Port("22/tcp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "22/tcp" {
t.Log(fmt.Sprint(first))
t.Fail()
}
}
func TestSortSamePortWithDifferentProto(t *testing.T) {
ports := []Port{
Port("8888/tcp"),
Port("8888/udp"),
Port("6379/tcp"),
Port("6379/udp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "6379/tcp" {
t.Fail()
}
return srv.ImageImport("-", "repo", name, archive, ioutil.Discard, utils.NewStreamFormatter(true))
}

View File

@ -1,14 +1,15 @@
package docker
import (
"fmt"
"archive/tar"
"bytes"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strings"
"testing"
)
@ -17,20 +18,18 @@ import (
// It has to be named XXX_test.go, apparently, in other to access private functions
// from other XXX_test.go functions.
var globalTestID string
// Create a temporary runtime suitable for unit testing.
// Call t.Fatal() at the first error.
func mkRuntime(f utils.Fataler) *Runtime {
func mkRuntime(f utils.Fataler) *docker.Runtime {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
f.Fatal(err)
}
config := &DaemonConfig{
config := &docker.DaemonConfig{
Root: root,
AutoRestart: false,
}
r, err := NewRuntimeFromDirectory(config)
r, err := docker.NewRuntimeFromDirectory(config)
if err != nil {
f.Fatal(err)
}
@ -38,7 +37,7 @@ func mkRuntime(f utils.Fataler) *Runtime {
return r
}
func createNamedTestContainer(eng *engine.Engine, config *Config, f utils.Fataler, name string) (shortId string) {
func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler, name string) (shortId string) {
job := eng.Job("create", name)
if err := job.ImportEnv(config); err != nil {
f.Fatal(err)
@ -50,22 +49,34 @@ func createNamedTestContainer(eng *engine.Engine, config *Config, f utils.Fatale
return
}
func createTestContainer(eng *engine.Engine, config *Config, f utils.Fataler) (shortId string) {
func createTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler) (shortId string) {
return createNamedTestContainer(eng, config, f, "")
}
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *Server {
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server {
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
if iSrv == nil {
panic("Legacy server field not set in engine")
}
srv, ok := iSrv.(*Server)
srv, ok := iSrv.(*docker.Server)
if !ok {
panic("Legacy server field in engine does not cast to *Server")
panic("Legacy server field in engine does not cast to *docker.Server")
}
return srv
}
func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime {
iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime")
if iRuntime == nil {
panic("Legacy runtime field not set in engine")
}
runtime, ok := iRuntime.(*docker.Runtime)
if !ok {
panic("Legacy runtime field in engine does not cast to *docker.Runtime")
}
return runtime
}
func NewTestEngine(t utils.Fataler) *engine.Engine {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
@ -87,31 +98,11 @@ func NewTestEngine(t utils.Fataler) *engine.Engine {
}
func newTestDirectory(templateDir string) (dir string, err error) {
if globalTestID == "" {
globalTestID = GenerateID()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, getCallerName(2))
if prefix == "" {
prefix = "docker-test-"
}
dir, err = ioutil.TempDir("", prefix)
if err = os.Remove(dir); err != nil {
return
}
if err = utils.CopyDirectory(templateDir, dir); err != nil {
return
}
return
return utils.TestDirectory(templateDir)
}
func getCallerName(depth int) string {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(depth + 1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
return callerShortName
return utils.GetCallerName(depth)
}
// Write `content` to the file at path `dst`, creating it if necessary,
@ -152,8 +143,8 @@ func readFile(src string, t *testing.T) (content string) {
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) {
config, hostConfig, _, err := ParseRun(args, nil)
func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, error) {
config, _, _, err := docker.ParseRun(args, nil)
defer func() {
if err != nil && t != nil {
t.Fatal(err)
@ -169,7 +160,13 @@ func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) {
if err != nil {
return nil, err
}
c.hostConfig = hostConfig
// NOTE: hostConfig is ignored.
// If `args` specify privileged mode, custom lxc conf, external mount binds,
// port redirects etc. they will be ignored.
// This is because the correct way to set these things is to pass environment
// to the `start` job.
// FIXME: this helper function should be deprecated in favor of calling
// `create` and `start` jobs directly.
return c, nil
}
@ -177,7 +174,7 @@ func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) {
// and return its standard output as a string.
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
func runContainer(r *Runtime, args []string, t *testing.T) (output string, err error) {
func runContainer(r *docker.Runtime, args []string, t *testing.T) (output string, err error) {
defer func() {
if err != nil && t != nil {
t.Fatal(err)
@ -205,289 +202,20 @@ func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e
return
}
func TestCompareConfig(t *testing.T) {
volumes1 := make(map[string]struct{})
volumes1["/test1"] = struct{}{}
config1 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config2 := Config{
Dns: []string{"0.0.0.0", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config3 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config4 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "22222222",
Volumes: volumes1,
}
volumes2 := make(map[string]struct{})
volumes2["/test2"] = struct{}{}
config5 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes2,
}
if CompareConfig(&config1, &config2) {
t.Fatalf("CompareConfig should return false, Dns are different")
}
if CompareConfig(&config1, &config3) {
t.Fatalf("CompareConfig should return false, PortSpecs are different")
}
if CompareConfig(&config1, &config4) {
t.Fatalf("CompareConfig should return false, VolumesFrom are different")
}
if CompareConfig(&config1, &config5) {
t.Fatalf("CompareConfig should return false, Volumes are different")
}
if !CompareConfig(&config1, &config1) {
t.Fatalf("CompareConfig should return true")
}
}
func TestMergeConfig(t *testing.T) {
volumesImage := make(map[string]struct{})
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
configImage := &Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "1111",
Volumes: volumesImage,
}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{
Dns: []string{"3.3.3.3"},
PortSpecs: []string{"3333:2222", "3333:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
if err := MergeConfig(configUser, configImage); err != nil {
t.Error(err)
}
if len(configUser.Dns) != 3 {
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
}
for _, dns := range configUser.Dns {
if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" {
t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns)
}
}
if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs)
}
}
if len(configUser.Env) != 3 {
t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env))
}
for _, env := range configUser.Env {
if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" {
t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env)
}
}
if len(configUser.Volumes) != 3 {
t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes))
}
for v := range configUser.Volumes {
if v != "/test1" && v != "/test2" && v != "/test3" {
t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v)
}
}
if configUser.VolumesFrom != "1111" {
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
}
ports, _, err := parsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
}
configImage2 := &Config{
ExposedPorts: ports,
}
if err := MergeConfig(configUser, configImage2); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 4 {
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
}
}
}
func TestParseLxcConfOpt(t *testing.T) {
opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
for _, o := range opts {
k, v, err := parseLxcOpt(o)
if err != nil {
t.FailNow()
}
if k != "lxc.utsname" {
t.Fail()
}
if v != "docker" {
t.Fail()
}
}
}
func TestParseNetworkOptsPrivateOnly(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsPublic(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "8080" {
t.Logf("Expected 8080 got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsUdp(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "udp" {
t.Logf("Expected udp got %s", k.Proto())
t.Fail()
}
if k.Port() != "6000" {
t.Logf("Expected 6000 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
// FIXME: this is duplicated from graph_test.go in the docker package.
func fakeTar() (io.Reader, error) {
content := []byte("Hello world!\n")
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
hdr := new(tar.Header)
hdr.Size = int64(len(content))
hdr.Name = name
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
tw.Write([]byte(content))
}
tw.Close()
return buf, nil
}