1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/integration/runtime_test.go

937 lines
25 KiB
Go
Raw Normal View History

2013-01-18 19:13:39 -05:00
package docker
import (
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
"bytes"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils"
2013-03-11 08:42:36 -04:00
"io"
"log"
"net"
2013-11-13 14:25:55 -05:00
"net/url"
2013-01-18 19:13:39 -05:00
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
2013-01-18 19:13:39 -05:00
"testing"
"time"
2013-01-18 19:13:39 -05:00
)
const (
2013-07-23 15:55:38 -04:00
unitTestImageName = "docker-test-image"
unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
2013-10-13 18:36:05 -04:00
unitTestImageIDShort = "83599e29c455"
2013-07-23 15:55:38 -04:00
unitTestNetworkBridge = "testdockbr0"
unitTestStoreBase = "/var/lib/docker/unit-tests"
testDaemonAddr = "127.0.0.1:4270"
testDaemonProto = "tcp"
)
var (
// FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
globalRuntime *docker.Runtime
globalEngine *engine.Engine
startFds int
startGoroutines int
)
// FIXME: nuke() is deprecated by Runtime.Nuke()
func nuke(runtime *docker.Runtime) error {
return runtime.Nuke()
}
// FIXME: cleanup and nuke are redundant.
func cleanup(eng *engine.Engine, t *testing.T) error {
runtime := mkRuntimeFromEngine(eng, t)
for _, container := range runtime.List() {
container.Kill()
runtime.Destroy(container)
}
job := eng.Job("images")
images, err := job.Stdout.AddTable()
if err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err)
}
for _, image := range images.Data {
if image.Get("ID") != unitTestImageID {
mkServerFromEngine(eng, t).ImageDelete(image.Get("ID"), false)
}
}
return nil
}
2013-03-11 08:42:36 -04:00
func layerArchive(tarfile string) (io.Reader, error) {
// FIXME: need to close f somewhere
f, err := os.Open(tarfile)
if err != nil {
return nil, err
}
return f, nil
}
func init() {
// Always use the same driver (vfs) for all integration tests.
// To test other drivers, we need a dedicated driver validation suite.
os.Setenv("DOCKER_DRIVER", "vfs")
2013-08-29 18:55:24 -04:00
os.Setenv("TEST", "1")
// Hack to run sys init during unit testing
if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" {
sysinit.SysInit()
return
}
if uid := syscall.Geteuid(); uid != 0 {
2013-07-04 15:40:14 -04:00
log.Fatal("docker tests need to be run as root")
}
// Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary)
if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" {
src, err := os.Open(dockerinit)
if err != nil {
log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s\n", err)
}
defer src.Close()
dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555)
if err != nil {
log.Fatalf("Unable to create dockerinit in test directory: %s\n", err)
}
defer dst.Close()
if _, err := io.Copy(dst, src); err != nil {
log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s\n", err)
}
dst.Close()
src.Close()
}
// Setup the base runtime, which will be duplicated for each test.
// (no tests are run directly in the base)
setupBaseImage()
// Create the "global runtime" with a long-running daemon for integration tests
spawnGlobalDaemon()
startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine()
}
func setupBaseImage() {
eng, err := engine.New(unitTestStoreBase)
if err != nil {
log.Fatalf("Can't initialize engine at %s: %s", unitTestStoreBase, err)
}
job := eng.Job("initapi")
job.Setenv("Root", unitTestStoreBase)
job.SetenvBool("Autorestart", false)
job.Setenv("BridgeIface", unitTestNetworkBridge)
if err := job.Run(); err != nil {
2013-11-29 19:53:20 -05:00
log.Fatalf("Unable to create a runtime for tests: %s", err)
}
srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
// If the unit test is not found, try to download it.
if img, err := srv.ImageInspect(unitTestImageName); err != nil || img.ID != unitTestImageID {
// Retrieve the Image
2013-08-22 22:32:09 -04:00
if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
2013-11-11 12:05:38 -05:00
log.Fatalf("Unable to pull the test image: %s", err)
}
}
}
func spawnGlobalDaemon() {
if globalRuntime != nil {
utils.Debugf("Global runtime already exists. Skipping.")
return
}
t := log.New(os.Stderr, "", 0)
eng := NewTestEngine(t)
globalEngine = eng
globalRuntime = mkRuntimeFromEngine(eng, t)
2013-06-24 17:59:37 -04:00
// Spawn a Daemon
go func() {
utils.Debugf("Spawning global daemon for integration tests")
listenURL := &url.URL{
2013-11-13 14:25:55 -05:00
Scheme: testDaemonProto,
Host: testDaemonAddr,
}
job := eng.Job("serveapi", listenURL.String())
job.SetenvBool("Logging", true)
if err := job.Run(); err != nil {
log.Fatalf("Unable to spawn the test daemon: %s", err)
2013-06-24 17:59:37 -04:00
}
}()
// Give some time to ListenAndServer to actually start
// FIXME: use inmem transports instead of tcp
time.Sleep(time.Second)
}
// FIXME: test that ImagePull(json=true) send correct json output
func GetTestImage(runtime *docker.Runtime) *docker.Image {
imgs, err := runtime.Graph().Map()
if err != nil {
2013-11-29 19:53:20 -05:00
log.Fatalf("Unable to get the test image: %s", err)
}
2013-08-31 23:31:21 -04:00
for _, image := range imgs {
if image.ID == unitTestImageID {
return image
}
}
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs)
2013-10-09 09:47:49 -04:00
return nil
}
2013-03-21 20:47:23 -04:00
func TestRuntimeCreate(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
2013-01-18 19:13:39 -05:00
// Make sure we start we 0 containers
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
2013-01-18 19:13:39 -05:00
}
container, _, err := runtime.Create(&docker.Config{
2013-06-04 14:00:22 -04:00
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
"",
2013-01-18 19:13:39 -05:00
)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := runtime.Destroy(container); err != nil {
2013-01-18 19:13:39 -05:00
t.Error(err)
}
}()
// Make sure we can find the newly created container with List()
if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
2013-01-18 19:13:39 -05:00
}
// Make sure the container List() returns is the right one
2013-06-04 14:00:22 -04:00
if runtime.List()[0].ID != container.ID {
t.Errorf("Unexpected container %v returned by List", runtime.List()[0])
2013-01-18 19:13:39 -05:00
}
// Make sure we can get the container with Get()
2013-06-04 14:00:22 -04:00
if runtime.Get(container.ID) == nil {
2013-01-18 19:13:39 -05:00
t.Errorf("Unable to get newly created container")
}
// Make sure it is the right container
2013-06-04 14:00:22 -04:00
if runtime.Get(container.ID) != container {
2013-01-18 19:13:39 -05:00
t.Errorf("Get() returned the wrong container")
}
// Make sure Exists returns it as existing
2013-06-04 14:00:22 -04:00
if !runtime.Exists(container.ID) {
2013-01-18 19:13:39 -05:00
t.Errorf("Exists() returned false for a newly created container")
}
// Test that conflict error displays correct details
testContainer, _, _ := runtime.Create(
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
"conflictname",
)
if _, _, err := runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) {
t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error())
}
// Make sure create with bad parameters returns an error
if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
t.Fatal("Builder.Create should throw an error when Cmd is missing")
}
if _, _, err := runtime.Create(
&docker.Config{
2013-06-04 14:00:22 -04:00
Image: GetTestImage(runtime).ID,
Cmd: []string{},
},
"",
); err == nil {
t.Fatal("Builder.Create should throw an error when Cmd is empty")
}
2013-10-03 10:46:07 -04:00
config := &docker.Config{
2013-10-03 10:46:07 -04:00
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/ls"},
PortSpecs: []string{"80"},
}
container, _, err = runtime.Create(config, "")
2013-10-03 10:46:07 -04:00
_, err = runtime.Commit(container, "testrepo", "testtag", "", "", config)
2013-10-03 10:46:07 -04:00
if err != nil {
t.Error(err)
}
// test expose 80:8000
container, warnings, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
PortSpecs: []string{"80:8000"},
},
"",
)
if err != nil {
t.Fatal(err)
}
2013-10-30 16:18:48 -04:00
if warnings == nil || len(warnings) != 1 {
t.Error("Expected a warning, got none")
}
2013-01-18 19:13:39 -05:00
}
func TestDestroy(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&docker.Config{
2013-06-04 14:00:22 -04:00
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
}, "")
2013-01-18 19:13:39 -05:00
if err != nil {
t.Fatal(err)
}
// Destroy
if err := runtime.Destroy(container); err != nil {
2013-01-18 19:13:39 -05:00
t.Error(err)
}
// Make sure runtime.Exists() behaves correctly
if runtime.Exists("test_destroy") {
2013-01-18 19:13:39 -05:00
t.Errorf("Exists() returned true")
}
// Make sure runtime.List() doesn't list the destroyed container
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List()))
2013-01-18 19:13:39 -05:00
}
// Make sure runtime.Get() refuses to return the unexisting container
2013-06-04 14:00:22 -04:00
if runtime.Get(container.ID) != nil {
2013-01-18 19:13:39 -05:00
t.Errorf("Unable to get newly created container")
}
// Test double destroy
if err := runtime.Destroy(container); err == nil {
2013-01-18 19:13:39 -05:00
// It should have failed
t.Errorf("Double destroy did not fail")
}
}
func TestGet(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
2013-11-14 15:33:15 -05:00
container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container1)
2013-01-18 19:13:39 -05:00
2013-11-14 15:33:15 -05:00
container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container2)
2013-01-18 19:13:39 -05:00
2013-11-14 15:33:15 -05:00
container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container3)
2013-01-18 19:13:39 -05:00
2013-06-04 14:00:22 -04:00
if runtime.Get(container1.ID) != container1 {
t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1)
2013-01-18 19:13:39 -05:00
}
2013-06-04 14:00:22 -04:00
if runtime.Get(container2.ID) != container2 {
t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2)
2013-01-18 19:13:39 -05:00
}
2013-06-04 14:00:22 -04:00
if runtime.Get(container3.ID) != container3 {
t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3)
2013-01-18 19:13:39 -05:00
}
}
func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) {
var (
err error
id string
strPort string
eng = NewTestEngine(t)
runtime = mkRuntimeFromEngine(eng, t)
port = 5554
p docker.Port
)
defer func() {
if err != nil {
runtime.Nuke()
}
}()
for {
port += 1
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
strPort = strconv.Itoa(port)
var cmd string
if proto == "tcp" {
cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat"
} else if proto == "udp" {
cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat"
} else {
t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
}
ep := make(map[docker.Port]struct{}, 1)
p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto))
ep[p] = struct{}{}
jobCreate := eng.Job("create")
jobCreate.Setenv("Image", unitTestImageID)
jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd})
jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)})
jobCreate.SetenvJson("ExposedPorts", ep)
jobCreate.Stdout.AddString(&id)
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
// FIXME: this relies on the undocumented behavior of runtime.Create
// which will return a nil error AND container if the exposed ports
// are invalid. That behavior should be fixed!
if id != "" {
break
}
2013-10-24 18:09:01 -04:00
t.Logf("Port %v already in use, trying another one", strPort)
}
jobStart := eng.Job("start", id)
portBindings := make(map[docker.Port][]docker.PortBinding)
portBindings[p] = []docker.PortBinding{
{},
}
if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil {
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
t.Fatal(err)
}
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
container := runtime.Get(id)
if container == nil {
t.Fatalf("Couldn't fetch test container %s", id)
}
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
2013-11-21 15:21:03 -05:00
for !container.State.IsRunning() {
time.Sleep(10 * time.Millisecond)
}
})
2013-07-02 06:47:37 -04:00
// Even if the state is running, lets give some time to lxc to spawn the process
container.WaitTimeout(500 * time.Millisecond)
strPort = container.NetworkSettings.Ports[p][0].HostPort
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
return runtime, container, strPort
}
// Run a container with a TCP port allocated, and test that it can receive connections on localhost
func TestAllocateTCPPortLocalhost(t *testing.T) {
runtime, container, port := startEchoServerContainer(t, "tcp")
defer nuke(runtime)
defer container.Kill()
for i := 0; i != 10; i++ {
conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port))
if err != nil {
t.Fatal(err)
}
defer conn.Close()
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
input := bytes.NewBufferString("well hello there\n")
_, err = conn.Write(input.Bytes())
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 16)
read := 0
conn.SetReadDeadline(time.Now().Add(3 * time.Second))
read, err = conn.Read(buf)
if err != nil {
if err, ok := err.(*net.OpError); ok {
if err.Err == syscall.ECONNRESET {
t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec")
conn.Close()
time.Sleep(time.Second)
continue
}
if err.Timeout() {
t.Log("Timeout, trying again")
conn.Close()
continue
}
}
t.Fatal(err)
}
output := string(buf[:read])
if !strings.Contains(output, "well hello there") {
t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output))
} else {
return
}
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
}
t.Fatal("No reply from the container")
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
}
// Run a container with an UDP port allocated, and test that it can receive connections on localhost
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
func TestAllocateUDPPortLocalhost(t *testing.T) {
runtime, container, port := startEchoServerContainer(t, "udp")
defer nuke(runtime)
defer container.Kill()
conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
if err != nil {
t.Fatal(err)
}
defer conn.Close()
input := bytes.NewBufferString("well hello there\n")
buf := make([]byte, 16)
// Try for a minute, for some reason the select in socat may take ages
// to return even though everything on the path seems fine (i.e: the
// UDPProxy forwards the traffic correctly and you can see the packets
// on the interface from within the container).
for i := 0; i != 120; i++ {
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
_, err := conn.Write(input.Bytes())
if err != nil {
t.Fatal(err)
}
conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-11 18:46:23 -04:00
read, err := conn.Read(buf)
if err == nil {
output := string(buf[:read])
if strings.Contains(output, "well hello there") {
return
}
}
}
t.Fatal("No reply from the container")
}
func TestRestore(t *testing.T) {
eng := NewTestEngine(t)
runtime1 := mkRuntimeFromEngine(eng, t)
defer runtime1.Nuke()
// Create a container with one instance of docker
2013-11-14 15:33:15 -05:00
container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
defer runtime1.Destroy(container1)
// Create a second container meant to be killed
2013-11-14 15:33:15 -05:00
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
2013-04-02 10:13:42 -04:00
defer runtime1.Destroy(container2)
// Start the container non blocking
if err := container2.Start(); err != nil {
t.Fatal(err)
}
2013-11-21 15:21:03 -05:00
if !container2.State.IsRunning() {
2013-06-04 14:00:22 -04:00
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
}
// Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
cStdin, _ := container2.StdinPipe()
cStdin.Close()
if err := container2.WaitTimeout(2 * time.Second); err != nil {
t.Fatal(err)
}
2013-11-21 15:21:03 -05:00
container2.State.SetRunning(42)
container2.ToDisk()
if len(runtime1.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
}
if err := container1.Run(); err != nil {
t.Fatal(err)
}
2013-11-21 15:21:03 -05:00
if !container2.State.IsRunning() {
2013-06-04 14:00:22 -04:00
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
}
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
2013-11-14 15:33:15 -05:00
root := eng.Root()
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
2013-11-14 15:33:15 -05:00
job.SetenvBool("Autorestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
}
runningCount := 0
for _, c := range runtime2.List() {
2013-11-21 15:21:03 -05:00
if c.State.IsRunning() {
2013-06-04 14:00:22 -04:00
t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
runningCount++
}
}
if runningCount != 0 {
t.Fatalf("Expected 0 container alive, %d found", runningCount)
}
2013-06-04 14:00:22 -04:00
container3 := runtime2.Get(container1.ID)
2013-04-02 10:13:42 -04:00
if container3 == nil {
t.Fatal("Unable to Get container")
}
2013-04-02 10:13:42 -04:00
if err := container3.Run(); err != nil {
t.Fatal(err)
}
2013-11-21 15:21:03 -05:00
container2.State.SetStopped(0)
}
func TestReloadContainerLinks(t *testing.T) {
// FIXME: here we don't use NewTestEngine because it calls initapi with Autorestart=false,
// and we want to set it to true.
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", true)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime1 := mkRuntimeFromEngine(eng, t)
defer nuke(runtime1)
// Create a container with one instance of docker
2013-11-14 15:33:15 -05:00
container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
defer runtime1.Destroy(container1)
// Create a second container meant to be killed
2013-11-14 15:33:15 -05:00
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
defer runtime1.Destroy(container2)
// Start the container non blocking
if err := container2.Start(); err != nil {
t.Fatal(err)
}
// Add a link to container 2
// FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink().
// Why do we need it @crosbymichael?
// container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
t.Fatal(err)
}
if err := container1.Start(); err != nil {
t.Fatal(err)
}
2013-11-21 15:21:03 -05:00
if !container2.State.IsRunning() {
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
}
2013-11-21 15:21:03 -05:00
if !container1.State.IsRunning() {
2013-10-21 18:42:27 -04:00
t.Fatalf("Container %s should appear as running but isn't", container1.ID)
}
if len(runtime1.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
}
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
2013-11-14 15:33:15 -05:00
eng, err = engine.New(root)
if err != nil {
t.Fatal(err)
}
job = eng.Job("initapi")
job.Setenv("Root", eng.Root())
2013-11-14 15:33:15 -05:00
job.SetenvBool("Autorestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
2013-11-14 15:33:15 -05:00
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
}
runningCount := 0
for _, c := range runtime2.List() {
2013-11-21 15:21:03 -05:00
if c.State.IsRunning() {
runningCount++
}
}
if runningCount != 2 {
t.Fatalf("Expected 2 container alive, %d found", runningCount)
}
// FIXME: we no longer test if containers were registered in the right order,
// because there is no public
// Make sure container 2 ( the child of container 1 ) was registered and started first
// with the runtime
//
containers := runtime2.List()
if len(containers) == 0 {
t.Fatalf("Runtime has no containers")
}
first := containers[0]
if first.ID != container2.ID {
t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID)
}
// Verify that the link is still registered in the runtime
if c := runtime2.Get(container1.Name); c == nil {
t.Fatal("Named container is no longer registered after restart")
}
}
func TestDefaultContainerName(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name"))
containerID := container.ID
if container.Name != "/some_name" {
t.Fatalf("Expect /some_name got %s", container.Name)
}
if c := runtime.Get("/some_name"); c == nil {
t.Fatalf("Couldn't retrieve test container as /some_name")
} else if c.ID != containerID {
t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
}
}
func TestRandomContainerName(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
container := runtime.Get(createTestContainer(eng, config, t))
containerID := container.ID
if container.Name == "" {
t.Fatalf("Expected not empty container name")
}
if c := runtime.Get(container.Name); c == nil {
log.Fatalf("Could not lookup container %s by its name", container.Name)
} else if c.ID != containerID {
log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
}
}
2013-12-17 19:57:14 -05:00
func TestContainerNameValidation(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
for _, test := range []struct {
Name string
Valid bool
}{
{"abc-123_AAA.1", true},
{"\000asdf", false},
} {
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
if !test.Valid {
continue
}
t.Fatal(err)
}
var shortID string
job := eng.Job("create", test.Name)
if err := job.ImportEnv(config); err != nil {
t.Fatal(err)
}
job.Stdout.AddString(&shortID)
if err := job.Run(); err != nil {
if !test.Valid {
continue
}
t.Fatal(err)
}
container := runtime.Get(shortID)
if container.Name != "/"+test.Name {
t.Fatalf("Expect /%s got %s", test.Name, container.Name)
}
if c := runtime.Get("/" + test.Name); c == nil {
t.Fatalf("Couldn't retrieve test container as /%s", test.Name)
} else if c.ID != container.ID {
t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID)
}
}
}
func TestLinkChildContainer(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
webapp, err := runtime.GetByName("/webapp")
if err != nil {
t.Fatal(err)
}
if webapp.ID != container.ID {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
}
config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
childContainer := runtime.Get(createTestContainer(eng, config, t))
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
t.Fatal(err)
}
// Get the child by it's new name
db, err := runtime.GetByName("/webapp/db")
if err != nil {
t.Fatal(err)
}
if db.ID != childContainer.ID {
t.Fatalf("Expect db id to match container id: %s != %s", db.ID, childContainer.ID)
}
}
func TestGetAllChildren(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
webapp, err := runtime.GetByName("/webapp")
if err != nil {
t.Fatal(err)
}
if webapp.ID != container.ID {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
}
config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
childContainer := runtime.Get(createTestContainer(eng, config, t))
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
t.Fatal(err)
}
children, err := runtime.Children("/webapp")
if err != nil {
t.Fatal(err)
}
if children == nil {
t.Fatal("Children should not be nil")
}
if len(children) == 0 {
t.Fatal("Children should not be empty")
}
for key, value := range children {
if key != "/webapp/db" {
t.Fatalf("Expected /webapp/db got %s", key)
}
if value.ID != childContainer.ID {
t.Fatalf("Expected id %s got %s", childContainer.ID, value.ID)
}
}
}
func TestDestroyWithInitLayer(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
}, "")
if err != nil {
t.Fatal(err)
}
// Destroy
if err := runtime.Destroy(container); err != nil {
t.Fatal(err)
}
// Make sure runtime.Exists() behaves correctly
if runtime.Exists("test_destroy") {
t.Fatalf("Exists() returned true")
}
// Make sure runtime.List() doesn't list the destroyed container
if len(runtime.List()) != 0 {
t.Fatalf("Expected 0 container, %v found", len(runtime.List()))
}
driver := runtime.Graph().Driver()
// Make sure that the container does not exist in the driver
if _, err := driver.Get(container.ID); err == nil {
t.Fatal("Conttainer should not exist in the driver")
}
// Make sure that the init layer is removed from the driver
if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID)); err == nil {
t.Fatal("Container's init layer should not exist in the driver")
}
}