diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go index 613ffdb79f..871b4cacd6 100644 --- a/integration-cli/check_test.go +++ b/integration-cli/check_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/cliconfig" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/reexec" "github.com/go-check/check" ) @@ -39,7 +40,7 @@ type DockerSuite struct { func (s *DockerSuite) OnTimeout(c *check.C) { if daemonPid > 0 && isLocalDaemon { - signalDaemonDump(daemonPid) + daemon.SignalDaemonDump(daemonPid) } } @@ -63,7 +64,7 @@ func init() { type DockerRegistrySuite struct { ds *DockerSuite reg *testRegistryV2 - d *Daemon + d *daemon.Daemon } func (s *DockerRegistrySuite) OnTimeout(c *check.C) { @@ -73,7 +74,9 @@ func (s *DockerRegistrySuite) OnTimeout(c *check.C) { func (s *DockerRegistrySuite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux, RegistryHosting) s.reg = setupRegistry(c, false, "", "") - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) } func (s *DockerRegistrySuite) TearDownTest(c *check.C) { @@ -95,7 +98,7 @@ func init() { type DockerSchema1RegistrySuite struct { ds *DockerSuite reg *testRegistryV2 - d *Daemon + d *daemon.Daemon } func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { @@ -105,7 +108,9 @@ func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64) s.reg = setupRegistry(c, true, "", "") - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) } func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { @@ -127,7 +132,7 @@ func init() { type DockerRegistryAuthHtpasswdSuite struct { ds *DockerSuite reg *testRegistryV2 - d *Daemon + d *daemon.Daemon } func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { @@ -137,7 +142,9 @@ func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux, RegistryHosting) s.reg = setupRegistry(c, false, "htpasswd", "") - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) } func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { @@ -161,7 +168,7 @@ func init() { type DockerRegistryAuthTokenSuite struct { ds *DockerSuite reg *testRegistryV2 - d *Daemon + d *daemon.Daemon } func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { @@ -170,7 +177,9 @@ func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux, RegistryHosting) - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) } func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { @@ -200,7 +209,7 @@ func init() { type DockerDaemonSuite struct { ds *DockerSuite - d *Daemon + d *daemon.Daemon } func (s *DockerDaemonSuite) OnTimeout(c *check.C) { @@ -209,7 +218,9 @@ func (s *DockerDaemonSuite) OnTimeout(c *check.C) { func (s *DockerDaemonSuite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux) - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) } func (s *DockerDaemonSuite) TearDownTest(c *check.C) { @@ -221,7 +232,7 @@ func (s *DockerDaemonSuite) TearDownTest(c *check.C) { } func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { - filepath.Walk(daemonSockRoot, func(path string, fi os.FileInfo, err error) error { + filepath.Walk(daemon.SockRoot, func(path string, fi os.FileInfo, err error) error { if err != nil { // ignore errors here // not cleaning up sockets is not really an error @@ -232,7 +243,7 @@ func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { } return nil }) - os.RemoveAll(daemonSockRoot) + os.RemoveAll(daemon.SockRoot) } const defaultSwarmPort = 2477 @@ -246,7 +257,7 @@ func init() { type DockerSwarmSuite struct { server *httptest.Server ds *DockerSuite - daemons []*SwarmDaemon + daemons []*daemon.Swarm daemonsLock sync.Mutex // protect access to daemons portIndex int } @@ -263,28 +274,27 @@ func (s *DockerSwarmSuite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux) } -func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *SwarmDaemon { - d := &SwarmDaemon{ - Daemon: NewDaemon(c), - port: defaultSwarmPort + s.portIndex, +func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Swarm { + d := &daemon.Swarm{ + Daemon: daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }), + Port: defaultSwarmPort + s.portIndex, } - d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port) + d.ListenAddr = fmt.Sprintf("0.0.0.0:%d", d.Port) args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts - if experimentalDaemon { - args = append(args, "--experimental") - } err := d.StartWithBusybox(args...) c.Assert(err, check.IsNil) if joinSwarm == true { if len(s.daemons) > 0 { - tokens := s.daemons[0].joinTokens(c) + tokens := s.daemons[0].JoinTokens(c) token := tokens.Worker if manager { token = tokens.Manager } c.Assert(d.Join(swarm.JoinRequest{ - RemoteAddrs: []string{s.daemons[0].listenAddr}, + RemoteAddrs: []string{s.daemons[0].ListenAddr}, JoinToken: token, }), check.IsNil) } else { @@ -306,13 +316,14 @@ func (s *DockerSwarmSuite) TearDownTest(c *check.C) { for _, d := range s.daemons { if d != nil { d.Stop() + // FIXME(vdemeester) should be handled by SwarmDaemon ? // raft state file is quite big (64MB) so remove it after every test - walDir := filepath.Join(d.root, "swarm/raft/wal") + walDir := filepath.Join(d.Root, "swarm/raft/wal") if err := os.RemoveAll(walDir); err != nil { c.Logf("error removing %v: %v", walDir, err) } - cleanupExecRoot(c, d.execRoot) + d.CleanupExecRoot(c) } } s.daemons = nil diff --git a/integration-cli/daemon.go b/integration-cli/daemon/daemon.go similarity index 57% rename from integration-cli/daemon.go rename to integration-cli/daemon/daemon.go index 9fd3f1e82d..a7da26abea 100644 --- a/integration-cli/daemon.go +++ b/integration-cli/daemon/daemon.go @@ -1,12 +1,17 @@ -package main +package daemon import ( "bytes" + "crypto/tls" "encoding/json" "errors" "fmt" "io" + "io/ioutil" + "net" "net/http" + "net/http/httputil" + "net/url" "os" "os/exec" "path/filepath" @@ -16,7 +21,9 @@ import ( "github.com/docker/docker/api/types/events" "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/integration" "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-connections/sockets" @@ -24,26 +31,37 @@ import ( "github.com/go-check/check" ) -var daemonSockRoot = filepath.Join(os.TempDir(), "docker-integration") +// SockRoot holds the path of the default docker integration daemon socket +var SockRoot = filepath.Join(os.TempDir(), "docker-integration") // Daemon represents a Docker daemon for the testing framework. type Daemon struct { - GlobalFlags []string + GlobalFlags []string + Root string + Folder string + Wait chan error + UseDefaultHost bool + UseDefaultTLSHost bool - id string - c *check.C - logFile *os.File - folder string - root string - stdin io.WriteCloser - stdout, stderr io.ReadCloser - cmd *exec.Cmd - storageDriver string - wait chan error - userlandProxy bool - useDefaultHost bool - useDefaultTLSHost bool - execRoot string + // FIXME(vdemeester) either should be used everywhere (do not return error) or nowhere, + // so I think we should remove it or use it for everything + c *check.C + id string + logFile *os.File + stdin io.WriteCloser + stdout, stderr io.ReadCloser + cmd *exec.Cmd + storageDriver string + userlandProxy bool + execRoot string + experimental bool + dockerBinary string + dockerdBinary string +} + +// Config holds docker daemon integration configuration +type Config struct { + Experimental bool } type clientConfig struct { @@ -52,14 +70,14 @@ type clientConfig struct { addr string } -// NewDaemon returns a Daemon instance to be used for testing. +// New returns a Daemon instance to be used for testing. // This will create a directory such as d123456789 in the folder specified by $DEST. // The daemon will not automatically start. -func NewDaemon(c *check.C) *Daemon { +func New(c *check.C, dockerBinary string, dockerdBinary string, config Config) *Daemon { dest := os.Getenv("DEST") c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable")) - err := os.MkdirAll(daemonSockRoot, 0700) + err := os.MkdirAll(SockRoot, 0700) c.Assert(err, checker.IsNil, check.Commentf("could not create daemon socket root")) id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) @@ -80,17 +98,35 @@ func NewDaemon(c *check.C) *Daemon { return &Daemon{ id: id, c: c, - folder: daemonFolder, - root: daemonRoot, + Folder: daemonFolder, + Root: daemonRoot, storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), userlandProxy: userlandProxy, execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), + dockerBinary: dockerBinary, + dockerdBinary: dockerdBinary, + experimental: config.Experimental, } } // RootDir returns the root directory of the daemon. func (d *Daemon) RootDir() string { - return d.root + return d.Root +} + +// ID returns the generated id of the daemon +func (d *Daemon) ID() string { + return d.id +} + +// StorageDriver returns the configured storage driver of the daemon +func (d *Daemon) StorageDriver() string { + return d.storageDriver +} + +// CleanupExecRoot cleans the daemon exec root (network namespaces, ...) +func (d *Daemon) CleanupExecRoot(c *check.C) { + cleanupExecRoot(c, d.execRoot) } func (d *Daemon) getClientConfig() (*clientConfig, error) { @@ -100,7 +136,7 @@ func (d *Daemon) getClientConfig() (*clientConfig, error) { addr string proto string ) - if d.useDefaultTLSHost { + if d.UseDefaultTLSHost { option := &tlsconfig.Options{ CAFile: "fixtures/https/ca.pem", CertFile: "fixtures/https/client-cert.pem", @@ -116,7 +152,7 @@ func (d *Daemon) getClientConfig() (*clientConfig, error) { addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) scheme = "https" proto = "tcp" - } else if d.useDefaultHost { + } else if d.UseDefaultHost { addr = opts.DefaultUnixSocket proto = "unix" scheme = "http" @@ -140,29 +176,29 @@ func (d *Daemon) getClientConfig() (*clientConfig, error) { // Start will start the daemon and return once it is ready to receive requests. // You can specify additional daemon flags. func (d *Daemon) Start(args ...string) error { - logFile, err := os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) - d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.folder)) + logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.Folder)) return d.StartWithLogFile(logFile, args...) } // StartWithLogFile will start the daemon and attach its streams to a given file. func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { - dockerdBinary, err := exec.LookPath(dockerdBinary) + dockerdBinary, err := exec.LookPath(d.dockerdBinary) d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id)) args := append(d.GlobalFlags, "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", - "--graph", d.root, + "--graph", d.Root, "--exec-root", d.execRoot, - "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), + "--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder), fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), ) - if experimentalDaemon { + if d.experimental { args = append(args, "--experimental", "--init") } - if !(d.useDefaultHost || d.useDefaultTLSHost) { - args = append(args, []string{"--host", d.sock()}...) + if !(d.UseDefaultHost || d.UseDefaultTLSHost) { + args = append(args, []string{"--host", d.Sock()}...) } if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { args = append(args, []string{"--userns-remap", root}...) @@ -206,7 +242,7 @@ func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { close(wait) }() - d.wait = wait + d.Wait = wait tick := time.Tick(500 * time.Millisecond) // make sure daemon is ready to receive requests @@ -242,12 +278,12 @@ func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status) } d.c.Logf("[%s] daemon started", d.id) - d.root, err = d.queryRootDir() + d.Root, err = d.queryRootDir() if err != nil { return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) } return nil - case <-d.wait: + case <-d.Wait: return fmt.Errorf("[%s] Daemon exited during startup", d.id) } } @@ -264,7 +300,7 @@ func (d *Daemon) StartWithBusybox(arg ...string) error { // Kill will send a SIGKILL to the daemon func (d *Daemon) Kill() error { - if d.cmd == nil || d.wait == nil { + if d.cmd == nil || d.Wait == nil { return errors.New("daemon not started") } @@ -278,13 +314,31 @@ func (d *Daemon) Kill() error { return err } - if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil { return err } return nil } +// Pid returns the pid of the daemon +func (d *Daemon) Pid() int { + return d.cmd.Process.Pid +} + +// Interrupt stops the daemon by sending it an Interrupt signal +func (d *Daemon) Interrupt() error { + return d.Signal(os.Interrupt) +} + +// Signal sends the specified signal to the daemon if running +func (d *Daemon) Signal(signal os.Signal) error { + if d.cmd == nil || d.Wait == nil { + return errors.New("daemon not started") + } + return d.cmd.Process.Signal(signal) +} + // DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its // stack to its log file and exit // This is used primarily for gathering debug information on test timeout @@ -292,7 +346,7 @@ func (d *Daemon) DumpStackAndQuit() { if d.cmd == nil || d.cmd.Process == nil { return } - signalDaemonDump(d.cmd.Process.Pid) + SignalDaemonDump(d.cmd.Process.Pid) } // Stop will send a SIGINT every second and wait for the daemon to stop. @@ -300,7 +354,7 @@ func (d *Daemon) DumpStackAndQuit() { // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. func (d *Daemon) Stop() error { - if d.cmd == nil || d.wait == nil { + if d.cmd == nil || d.Wait == nil { return errors.New("daemon not started") } @@ -318,7 +372,7 @@ func (d *Daemon) Stop() error { out1: for { select { - case err := <-d.wait: + case err := <-d.Wait: return err case <-time.After(20 * time.Second): // time for stopping jobs and run onShutdown hooks @@ -330,7 +384,7 @@ out1: out2: for { select { - case err := <-d.wait: + case err := <-d.Wait: return err case <-tick: i++ @@ -350,7 +404,7 @@ out2: return err } - if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil { return err } @@ -361,26 +415,26 @@ out2: func (d *Daemon) Restart(arg ...string) error { d.Stop() // in the case of tests running a user namespace-enabled daemon, we have resolved - // d.root to be the actual final path of the graph dir after the "uid.gid" of + // d.Root to be the actual final path of the graph dir after the "uid.gid" of // remapped root is added--we need to subtract it from the path before calling // start or else we will continue making subdirectories rather than truly restarting // with the same location/root: if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - d.root = filepath.Dir(d.root) + d.Root = filepath.Dir(d.Root) } return d.Start(arg...) } // LoadBusybox will load the stored busybox into a newly started daemon func (d *Daemon) LoadBusybox() error { - bb := filepath.Join(d.folder, "busybox.tar") + bb := filepath.Join(d.Folder, "busybox.tar") if _, err := os.Stat(bb); err != nil { if !os.IsNotExist(err) { return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) } // saving busybox image from main daemon - if out, err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { - imagesOut, _ := exec.Command(dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() + if out, err := exec.Command(d.dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { + imagesOut, _ := exec.Command(d.dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() return fmt.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut))) } } @@ -427,7 +481,7 @@ func (d *Daemon) queryRootDir() (string, error) { } var b []byte var i Info - b, err = readBody(body) + b, err = integration.ReadBody(body) if err == nil && resp.StatusCode == http.StatusOK { // read the docker root dir if err = json.Unmarshal(b, &i); err == nil { @@ -437,22 +491,25 @@ func (d *Daemon) queryRootDir() (string, error) { return "", err } -func (d *Daemon) sock() string { +// Sock returns the socket path of the daemon +func (d *Daemon) Sock() string { return fmt.Sprintf("unix://" + d.sockPath()) } func (d *Daemon) sockPath() string { - return filepath.Join(daemonSockRoot, d.id+".sock") + return filepath.Join(SockRoot, d.id+".sock") } -func (d *Daemon) waitRun(contID string) error { - args := []string{"--host", d.sock()} - return waitInspectWithArgs(contID, "{{.State.Running}}", "true", 10*time.Second, args...) +// WaitRun waits for a container to be running for 10s +func (d *Daemon) WaitRun(contID string) error { + args := []string{"--host", d.Sock()} + return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...) } -func (d *Daemon) getBaseDeviceSize(c *check.C) int64 { - infoCmdOutput, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "-H", d.sock(), "info"), +// GetBaseDeviceSize returns the base device size of the daemon +func (d *Daemon) GetBaseDeviceSize(c *check.C) int64 { + infoCmdOutput, _, err := integration.RunCommandPipelineWithOutput( + exec.Command(d.dockerBinary, "-H", d.Sock(), "info"), exec.Command("grep", "Base Device Size"), ) c.Assert(err, checker.IsNil) @@ -468,21 +525,23 @@ func (d *Daemon) getBaseDeviceSize(c *check.C) int64 { // Cmd will execute a docker CLI command against this Daemon. // Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version func (d *Daemon) Cmd(args ...string) (string, error) { - b, err := d.command(args...).CombinedOutput() + b, err := d.Command(args...).CombinedOutput() return string(b), err } -func (d *Daemon) command(args ...string) *exec.Cmd { - return exec.Command(dockerBinary, d.prependHostArg(args)...) +// Command will create a docker CLI command against this Daeomn. +func (d *Daemon) Command(args ...string) *exec.Cmd { + return exec.Command(d.dockerBinary, d.PrependHostArg(args)...) } -func (d *Daemon) prependHostArg(args []string) []string { +// PrependHostArg prepend the specified arguments by the daemon host flags +func (d *Daemon) PrependHostArg(args []string) []string { for _, arg := range args { if arg == "--host" || arg == "-H" { return args } } - return append([]string{"--host", d.sock()}, args...) + return append([]string{"--host", d.Sock()}, args...) } // SockRequest executes a socket request on a daemon and returns statuscode and output. @@ -496,14 +555,14 @@ func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, [] if err != nil { return -1, nil, err } - b, err := readBody(body) + b, err := integration.ReadBody(body) return res.StatusCode, b, err } // SockRequestRaw executes a socket request on a daemon and returns an http // response and a reader for the output data. func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { - return sockRequestRawToDaemon(method, endpoint, data, ct, d.sock()) + return SockRequestRawToDaemon(method, endpoint, data, ct, d.Sock()) } // LogFileName returns the path the the daemon's log file @@ -511,11 +570,14 @@ func (d *Daemon) LogFileName() string { return d.logFile.Name() } -func (d *Daemon) getIDByName(name string) (string, error) { +// GetIDByName returns the ID of an object (container, volume, …) given its name +func (d *Daemon) GetIDByName(name string) (string, error) { return d.inspectFieldWithError(name, "Id") } -func (d *Daemon) activeContainers() (ids []string) { +// ActiveContainers returns the list of ids of the currently running containers +func (d *Daemon) ActiveContainers() (ids []string) { + // FIXME(vdemeester) shouldn't ignore the error out, _ := d.Cmd("ps", "-q") for _, id := range strings.Split(out, "\n") { if id = strings.TrimSpace(id); id != "" { @@ -525,6 +587,11 @@ func (d *Daemon) activeContainers() (ids []string) { return } +// ReadLogFile returns the content of the daemon log file +func (d *Daemon) ReadLogFile() ([]byte, error) { + return ioutil.ReadFile(d.logFile.Name()) +} + func (d *Daemon) inspectFilter(name, filter string) (string, error) { format := fmt.Sprintf("{{%s}}", filter) out, err := d.Cmd("inspect", "-f", format, name) @@ -538,7 +605,9 @@ func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { return d.inspectFilter(name, fmt.Sprintf(".%s", field)) } -func (d *Daemon) findContainerIP(id string) string { +// FindContainerIP returns the ip of the specified container +// FIXME(vdemeester) should probably erroring out +func (d *Daemon) FindContainerIP(id string) string { out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id) if err != nil { d.c.Log(err) @@ -546,12 +615,22 @@ func (d *Daemon) findContainerIP(id string) string { return strings.Trim(out, " \r\n'") } -func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { - buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...) - return runCommandWithOutput(buildCmd) +// BuildImageWithOut builds an image with the specified dockerfile and options and returns the output +func (d *Daemon) BuildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { + buildCmd := BuildImageCmdWithHost(d.dockerBinary, name, dockerfile, d.Sock(), useCache, buildFlags...) + result := icmd.RunCmd(icmd.Cmd{ + Command: buildCmd.Args, + Env: buildCmd.Env, + Dir: buildCmd.Dir, + Stdin: buildCmd.Stdin, + Stdout: buildCmd.Stdout, + }) + return result.Combined(), result.ExitCode, result.Error } -func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { +// CheckActiveContainerCount returns the number of active containers +// FIXME(vdemeester) should re-use ActivateContainers in some way +func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { out, err := d.Cmd("ps", "-q") c.Assert(err, checker.IsNil) if len(strings.TrimSpace(out)) == 0 { @@ -560,7 +639,8 @@ func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.Comme return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) } -func (d *Daemon) reloadConfig() error { +// ReloadConfig asks the daemon to reload its configuration +func (d *Daemon) ReloadConfig() error { if d.cmd == nil || d.cmd.Process == nil { return fmt.Errorf("daemon is not running") } @@ -568,7 +648,7 @@ func (d *Daemon) reloadConfig() error { errCh := make(chan error) started := make(chan struct{}) go func() { - _, body, err := sockRequestRawToDaemon("GET", "/events", nil, "", d.sock()) + _, body, err := SockRequestRawToDaemon("GET", "/events", nil, "", d.Sock()) close(started) if err != nil { errCh <- err @@ -606,3 +686,149 @@ func (d *Daemon) reloadConfig() error { } return nil } + +// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time. +// FIXME(vdemeester) Attach this to the Daemon struct +func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error { + after := time.After(timeout) + + args := append(arg, "inspect", "-f", expr, name) + for { + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + if !strings.Contains(result.Stderr(), "No such") { + return fmt.Errorf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + return result.Error + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) + default: + } + + time.Sleep(100 * time.Millisecond) + } + return nil +} + +// SockRequestRawToDaemon creates an http request against the specified daemon socket +// FIXME(vdemeester) attach this to daemon ? +func SockRequestRawToDaemon(method, endpoint string, data io.Reader, ct, daemon string) (*http.Response, io.ReadCloser, error) { + req, client, err := newRequestClient(method, endpoint, data, ct, daemon) + if err != nil { + return nil, nil, err + } + + resp, err := client.Do(req) + if err != nil { + client.Close() + return nil, nil, err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + defer resp.Body.Close() + return client.Close() + }) + + return resp, body, nil +} + +func getTLSConfig() (*tls.Config, error) { + dockerCertPath := os.Getenv("DOCKER_CERT_PATH") + + if dockerCertPath == "" { + return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") + } + + option := &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// SockConn opens a connection on the specified socket +func SockConn(timeout time.Duration, daemon string) (net.Conn, error) { + daemonURL, err := url.Parse(daemon) + if err != nil { + return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) + } + + var c net.Conn + switch daemonURL.Scheme { + case "npipe": + return npipeDial(daemonURL.Path, timeout) + case "unix": + return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) + case "tcp": + if os.Getenv("DOCKER_TLS_VERIFY") != "" { + // Setup the socket TLS configuration. + tlsConfig, err := getTLSConfig() + if err != nil { + return nil, err + } + dialer := &net.Dialer{Timeout: timeout} + return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) + } + return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) + default: + return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) + } +} + +func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string) (*http.Request, *httputil.ClientConn, error) { + c, err := SockConn(time.Duration(10*time.Second), daemon) + if err != nil { + return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) + } + + client := httputil.NewClientConn(c, nil) + + req, err := http.NewRequest(method, endpoint, data) + if err != nil { + client.Close() + return nil, nil, fmt.Errorf("could not create new request: %v", err) + } + + if ct != "" { + req.Header.Set("Content-Type", ct) + } + return req, client, nil +} + +// BuildImageCmdWithHost create a build command with the specified arguments. +// FIXME(vdemeester) move this away +func BuildImageCmdWithHost(dockerBinary, name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { + args := []string{} + if host != "" { + args = append(args, "--host", host) + } + args = append(args, "build", "-t", name) + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd +} diff --git a/integration-cli/daemon_swarm.go b/integration-cli/daemon/daemon_swarm.go similarity index 68% rename from integration-cli/daemon_swarm.go rename to integration-cli/daemon/daemon_swarm.go index f871d5fda3..2b7f71908b 100644 --- a/integration-cli/daemon_swarm.go +++ b/integration-cli/daemon/daemon_swarm.go @@ -1,4 +1,4 @@ -package main +package daemon import ( "encoding/json" @@ -14,18 +14,18 @@ import ( "github.com/go-check/check" ) -// SwarmDaemon is a test daemon with helpers for participating in a swarm. -type SwarmDaemon struct { +// Swarm is a test daemon with helpers for participating in a swarm. +type Swarm struct { *Daemon swarm.Info - port int - listenAddr string + Port int + ListenAddr string } // Init initializes a new swarm cluster. -func (d *SwarmDaemon) Init(req swarm.InitRequest) error { +func (d *Swarm) Init(req swarm.InitRequest) error { if req.ListenAddr == "" { - req.ListenAddr = d.listenAddr + req.ListenAddr = d.ListenAddr } status, out, err := d.SockRequest("POST", "/swarm/init", req) if status != http.StatusOK { @@ -34,7 +34,7 @@ func (d *SwarmDaemon) Init(req swarm.InitRequest) error { if err != nil { return fmt.Errorf("initializing swarm: %v", err) } - info, err := d.info() + info, err := d.SwarmInfo() if err != nil { return err } @@ -43,9 +43,9 @@ func (d *SwarmDaemon) Init(req swarm.InitRequest) error { } // Join joins a daemon to an existing cluster. -func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { +func (d *Swarm) Join(req swarm.JoinRequest) error { if req.ListenAddr == "" { - req.ListenAddr = d.listenAddr + req.ListenAddr = d.ListenAddr } status, out, err := d.SockRequest("POST", "/swarm/join", req) if status != http.StatusOK { @@ -54,7 +54,7 @@ func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { if err != nil { return fmt.Errorf("joining swarm: %v", err) } - info, err := d.info() + info, err := d.SwarmInfo() if err != nil { return err } @@ -63,7 +63,7 @@ func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { } // Leave forces daemon to leave current cluster. -func (d *SwarmDaemon) Leave(force bool) error { +func (d *Swarm) Leave(force bool) error { url := "/swarm/leave" if force { url += "?force=1" @@ -78,7 +78,8 @@ func (d *SwarmDaemon) Leave(force bool) error { return err } -func (d *SwarmDaemon) info() (swarm.Info, error) { +// SwarmInfo returns the swarm information of the daemon +func (d *Swarm) SwarmInfo() (swarm.Info, error) { var info struct { Swarm swarm.Info } @@ -95,11 +96,17 @@ func (d *SwarmDaemon) info() (swarm.Info, error) { return info.Swarm, nil } -type serviceConstructor func(*swarm.Service) -type nodeConstructor func(*swarm.Node) -type specConstructor func(*swarm.Spec) +// ServiceConstructor defines a swarm service constructor function +type ServiceConstructor func(*swarm.Service) -func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string { +// NodeConstructor defines a swarm node constructor +type NodeConstructor func(*swarm.Node) + +// SpecConstructor defines a swarm spec constructor +type SpecConstructor func(*swarm.Spec) + +// CreateService creates a swarm service given the specified service constructor +func (d *Swarm) CreateService(c *check.C, f ...ServiceConstructor) string { var service swarm.Service for _, fn := range f { fn(&service) @@ -114,7 +121,8 @@ func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string return scr.ID } -func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service { +// GetService returns the swarm service corresponding to the specified id +func (d *Swarm) GetService(c *check.C, id string) *swarm.Service { var service swarm.Service status, out, err := d.SockRequest("GET", "/services/"+id, nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) @@ -123,7 +131,8 @@ func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service { return &service } -func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { +// GetServiceTasks returns the swarm tasks for the specified service +func (d *Swarm) GetServiceTasks(c *check.C, service string) []swarm.Task { var tasks []swarm.Task filterArgs := filters.NewArgs() @@ -139,9 +148,10 @@ func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { return tasks } -func (d *SwarmDaemon) checkServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { +// CheckServiceRunningTasks returns the number of running tasks for the specified service +func (d *Swarm) CheckServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { return func(c *check.C) (interface{}, check.CommentInterface) { - tasks := d.getServiceTasks(c, service) + tasks := d.GetServiceTasks(c, service) var runningCount int for _, task := range tasks { if task.Status.State == swarm.TaskStateRunning { @@ -152,9 +162,10 @@ func (d *SwarmDaemon) checkServiceRunningTasks(service string) func(*check.C) (i } } -func (d *SwarmDaemon) checkServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { +// CheckServiceUpdateState returns the current update state for the specified service +func (d *Swarm) CheckServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { return func(c *check.C) (interface{}, check.CommentInterface) { - service := d.getService(c, service) + service := d.GetService(c, service) if service.UpdateStatus == nil { return "", nil } @@ -162,14 +173,16 @@ func (d *SwarmDaemon) checkServiceUpdateState(service string) func(*check.C) (in } } -func (d *SwarmDaemon) checkServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { +// CheckServiceTasks returns the number of tasks for the specified service +func (d *Swarm) CheckServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { return func(c *check.C) (interface{}, check.CommentInterface) { - tasks := d.getServiceTasks(c, service) + tasks := d.GetServiceTasks(c, service) return len(tasks), nil } } -func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { +// CheckRunningTaskImages returns the number of different images attached to a running task +func (d *Swarm) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { var tasks []swarm.Task filterArgs := filters.NewArgs() @@ -191,8 +204,9 @@ func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.Com return result, nil } -func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { - nodes := d.listNodes(c) +// CheckNodeReadyCount returns the number of ready node on the swarm +func (d *Swarm) CheckNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { + nodes := d.ListNodes(c) var readyCount int for _, node := range nodes { if node.Status.State == swarm.NodeStateReady { @@ -202,7 +216,8 @@ func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.Commen return readyCount, nil } -func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task { +// GetTask returns the swarm task identified by the specified id +func (d *Swarm) GetTask(c *check.C, id string) swarm.Task { var task swarm.Task status, out, err := d.SockRequest("GET", "/tasks/"+id, nil) @@ -212,7 +227,8 @@ func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task { return task } -func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...serviceConstructor) { +// UpdateService updates a swarm service with the specified service constructor +func (d *Swarm) UpdateService(c *check.C, service *swarm.Service, f ...ServiceConstructor) { for _, fn := range f { fn(service) } @@ -222,13 +238,15 @@ func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...ser c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) } -func (d *SwarmDaemon) removeService(c *check.C, id string) { +// RemoveService removes the specified service +func (d *Swarm) RemoveService(c *check.C, id string) { status, out, err := d.SockRequest("DELETE", "/services/"+id, nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) } -func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node { +// GetNode returns a swarm node identified by the specified id +func (d *Swarm) GetNode(c *check.C, id string) *swarm.Node { var node swarm.Node status, out, err := d.SockRequest("GET", "/nodes/"+id, nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) @@ -238,7 +256,8 @@ func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node { return &node } -func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) { +// RemoveNode removes the specified node +func (d *Swarm) RemoveNode(c *check.C, id string, force bool) { url := "/nodes/" + id if force { url += "?force=1" @@ -249,9 +268,10 @@ func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) { c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) } -func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) { +// UpdateNode updates a swarm node with the specified node constructor +func (d *Swarm) UpdateNode(c *check.C, id string, f ...NodeConstructor) { for i := 0; ; i++ { - node := d.getNode(c, id) + node := d.GetNode(c, id) for _, fn := range f { fn(node) } @@ -267,7 +287,8 @@ func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) { } } -func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node { +// ListNodes returns the list of the current swarm nodes +func (d *Swarm) ListNodes(c *check.C) []swarm.Node { status, out, err := d.SockRequest("GET", "/nodes", nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) @@ -277,7 +298,8 @@ func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node { return nodes } -func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service { +// ListServices return the list of the current swarm services +func (d *Swarm) ListServices(c *check.C) []swarm.Service { status, out, err := d.SockRequest("GET", "/services", nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) @@ -287,7 +309,8 @@ func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service { return services } -func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) string { +// CreateSecret creates a secret given the specified spec +func (d *Swarm) CreateSecret(c *check.C, secretSpec swarm.SecretSpec) string { status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) c.Assert(err, checker.IsNil, check.Commentf(string(out))) @@ -298,7 +321,8 @@ func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) stri return scr.ID } -func (d *SwarmDaemon) listSecrets(c *check.C) []swarm.Secret { +// ListSecrets returns the list of the current swarm secrets +func (d *Swarm) ListSecrets(c *check.C) []swarm.Secret { status, out, err := d.SockRequest("GET", "/secrets", nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) @@ -308,7 +332,8 @@ func (d *SwarmDaemon) listSecrets(c *check.C) []swarm.Secret { return secrets } -func (d *SwarmDaemon) getSecret(c *check.C, id string) *swarm.Secret { +// GetSecret returns a swarm secret identified by the specified id +func (d *Swarm) GetSecret(c *check.C, id string) *swarm.Secret { var secret swarm.Secret status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) @@ -317,13 +342,15 @@ func (d *SwarmDaemon) getSecret(c *check.C, id string) *swarm.Secret { return &secret } -func (d *SwarmDaemon) deleteSecret(c *check.C, id string) { +// DeleteSecret removes the swarm secret identified by the specified id +func (d *Swarm) DeleteSecret(c *check.C, id string) { status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) } -func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm { +// GetSwarm return the current swarm object +func (d *Swarm) GetSwarm(c *check.C) swarm.Swarm { var sw swarm.Swarm status, out, err := d.SockRequest("GET", "/swarm", nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) @@ -332,8 +359,9 @@ func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm { return sw } -func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) { - sw := d.getSwarm(c) +// UpdateSwarm updates the current swarm object with the specified spec constructors +func (d *Swarm) UpdateSwarm(c *check.C, f ...SpecConstructor) { + sw := d.GetSwarm(c) for _, fn := range f { fn(&sw.Spec) } @@ -343,7 +371,8 @@ func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) { c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) } -func (d *SwarmDaemon) rotateTokens(c *check.C) { +// RotateTokens update the swarm to rotate tokens +func (d *Swarm) RotateTokens(c *check.C) { var sw swarm.Swarm status, out, err := d.SockRequest("GET", "/swarm", nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) @@ -356,7 +385,8 @@ func (d *SwarmDaemon) rotateTokens(c *check.C) { c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) } -func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens { +// JoinTokens returns the current swarm join tokens +func (d *Swarm) JoinTokens(c *check.C) swarm.JoinTokens { var sw swarm.Swarm status, out, err := d.SockRequest("GET", "/swarm", nil) c.Assert(err, checker.IsNil, check.Commentf(string(out))) @@ -365,20 +395,23 @@ func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens { return sw.JoinTokens } -func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { - info, err := d.info() +// CheckLocalNodeState returns the current swarm node state +func (d *Swarm) CheckLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.SwarmInfo() c.Assert(err, checker.IsNil) return info.LocalNodeState, nil } -func (d *SwarmDaemon) checkControlAvailable(c *check.C) (interface{}, check.CommentInterface) { - info, err := d.info() +// CheckControlAvailable returns the current swarm control available +func (d *Swarm) CheckControlAvailable(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) return info.ControlAvailable, nil } -func (d *SwarmDaemon) checkLeader(c *check.C) (interface{}, check.CommentInterface) { +// CheckLeader returns whether there is a leader on the swarm or not +func (d *Swarm) CheckLeader(c *check.C) (interface{}, check.CommentInterface) { errList := check.Commentf("could not get node list") status, out, err := d.SockRequest("GET", "/nodes", nil) if err != nil { @@ -401,7 +434,8 @@ func (d *SwarmDaemon) checkLeader(c *check.C) (interface{}, check.CommentInterfa return fmt.Errorf("no leader"), check.Commentf("could not find leader") } -func (d *SwarmDaemon) cmdRetryOutOfSequence(args ...string) (string, error) { +// CmdRetryOutOfSequence tries the specified command against the current daemon for 10 times +func (d *Swarm) CmdRetryOutOfSequence(args ...string) (string, error) { for i := 0; ; i++ { out, err := d.Cmd(args...) if err != nil { diff --git a/integration-cli/daemon_unix.go b/integration-cli/daemon/daemon_unix.go similarity index 87% rename from integration-cli/daemon_unix.go rename to integration-cli/daemon/daemon_unix.go index 6ca7daf21c..cacff728f0 100644 --- a/integration-cli/daemon_unix.go +++ b/integration-cli/daemon/daemon_unix.go @@ -1,6 +1,6 @@ // +build !windows -package main +package daemon import ( "os" @@ -26,7 +26,8 @@ func cleanupExecRoot(c *check.C, execRoot string) { }) } -func signalDaemonDump(pid int) { +// SignalDaemonDump sends a signal to the daemon to write a dump file +func SignalDaemonDump(pid int) { syscall.Kill(pid, syscall.SIGQUIT) } diff --git a/integration-cli/daemon_windows.go b/integration-cli/daemon/daemon_windows.go similarity index 90% rename from integration-cli/daemon_windows.go rename to integration-cli/daemon/daemon_windows.go index 885b703b33..81dae7a485 100644 --- a/integration-cli/daemon_windows.go +++ b/integration-cli/daemon/daemon_windows.go @@ -1,4 +1,4 @@ -package main +package daemon import ( "fmt" @@ -32,7 +32,8 @@ func pulseEvent(handle syscall.Handle, proc *windows.LazyProc) (err error) { return } -func signalDaemonDump(pid int) { +// SignalDaemonDump sends a signal to the daemon to write a dump file +func SignalDaemonDump(pid int) { modkernel32 := windows.NewLazySystemDLL("kernel32.dll") procOpenEvent := modkernel32.NewProc("OpenEventW") procPulseEvent := modkernel32.NewProc("PulseEvent") diff --git a/integration-cli/npipe.go b/integration-cli/daemon/npipe.go similarity index 91% rename from integration-cli/npipe.go rename to integration-cli/daemon/npipe.go index fa531a1b4d..4e164aae4b 100644 --- a/integration-cli/npipe.go +++ b/integration-cli/daemon/npipe.go @@ -1,6 +1,6 @@ // +build !windows -package main +package daemon import ( "net" diff --git a/integration-cli/npipe_windows.go b/integration-cli/daemon/npipe_windows.go similarity index 92% rename from integration-cli/npipe_windows.go rename to integration-cli/daemon/npipe_windows.go index 4fd735f2db..d3c13be5d9 100644 --- a/integration-cli/npipe_windows.go +++ b/integration-cli/daemon/npipe_windows.go @@ -1,4 +1,4 @@ -package main +package daemon import ( "net" diff --git a/integration-cli/daemon_swarm_hack.go b/integration-cli/daemon_swarm_hack.go index 0cea901420..e1fb333f82 100644 --- a/integration-cli/daemon_swarm_hack.go +++ b/integration-cli/daemon_swarm_hack.go @@ -1,8 +1,11 @@ package main -import "github.com/go-check/check" +import ( + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) -func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *SwarmDaemon { +func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *daemon.Swarm { s.daemonsLock.Lock() defer s.daemonsLock.Unlock() for _, d := range s.daemons { diff --git a/integration-cli/docker_api_attach_test.go b/integration-cli/docker_api_attach_test.go index d43bf3ab0e..6490069a52 100644 --- a/integration-cli/docker_api_attach_test.go +++ b/integration-cli/docker_api_attach_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/client" + "github.com/docker/docker/pkg/integration" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stdcopy" "github.com/go-check/check" @@ -79,7 +80,7 @@ func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { // connection will shutdown, err should be "persistent connection closed" c.Assert(err, checker.NotNil) // Server shutdown connection - body, err := readBody(resp.Body) + body, err := integration.ReadBody(resp.Body) c.Assert(err, checker.IsNil) c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) expected := "No such container: doesnotexist\r\n" diff --git a/integration-cli/docker_api_build_test.go b/integration-cli/docker_api_build_test.go index 9b069a43a6..0cc97a95d7 100644 --- a/integration-cli/docker_api_build_test.go +++ b/integration-cli/docker_api_build_test.go @@ -7,6 +7,7 @@ import ( "regexp" "strings" + "github.com/docker/docker/pkg/integration" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -34,7 +35,7 @@ RUN find /tmp/` c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - buf, err := readBody(body) + buf, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) // Make sure Dockerfile exists. @@ -125,7 +126,7 @@ RUN echo 'right' c.Assert(res.StatusCode, checker.Equals, http.StatusOK) defer body.Close() - content, err := readBody(body) + content, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) // Build used the wrong dockerfile. @@ -144,7 +145,7 @@ RUN echo from dockerfile`, c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - buf, err := readBody(body) + buf, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) out := string(buf) @@ -166,7 +167,7 @@ RUN echo from Dockerfile`, c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - buf, err := readBody(body) + buf, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) out := string(buf) @@ -189,7 +190,7 @@ RUN echo from dockerfile`, c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - buf, err := readBody(body) + buf, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) out := string(buf) @@ -236,7 +237,7 @@ func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - out, err := readBody(body) + out, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) lines := strings.Split(string(out), "\n") c.Assert(len(lines), checker.GreaterThan, 1) diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go index eccba922b9..88d85232f1 100644 --- a/integration-cli/docker_api_containers_test.go +++ b/integration-cli/docker_api_containers_test.go @@ -723,7 +723,7 @@ func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(string(b[:]), checker.Contains, "invalid port") } @@ -743,7 +743,7 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(string(b[:]), checker.Contains, "invalid restart policy") } @@ -763,7 +763,7 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy") } @@ -783,7 +783,7 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative") } @@ -834,7 +834,7 @@ func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) type createResp struct { ID string @@ -863,7 +863,7 @@ func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) - b, err2 := readBody(body) + b, err2 := integration.ReadBody(body) c.Assert(err2, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go index 716e9ac68f..def43b7484 100644 --- a/integration-cli/docker_api_exec_test.go +++ b/integration-cli/docker_api_exec_test.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/docker/docker/pkg/integration" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -40,7 +41,7 @@ func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") @@ -107,7 +108,7 @@ func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/v1.20/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "text/plain") c.Assert(err, checker.IsNil) - b, err := readBody(body) + b, err := integration.ReadBody(body) comment := check.Commentf("response body: %s", b) c.Assert(err, checker.IsNil, comment) c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) @@ -156,7 +157,7 @@ func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) { _, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json") c.Assert(err, checker.IsNil) - b, err = readBody(body) + b, err = integration.ReadBody(body) comment := check.Commentf("response body: %s", b) c.Assert(err, checker.IsNil, comment) @@ -182,7 +183,7 @@ func startExec(c *check.C, id string, code int) { resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") c.Assert(err, checker.IsNil) - b, err := readBody(body) + b, err := integration.ReadBody(body) comment := check.Commentf("response body: %s", b) c.Assert(err, checker.IsNil, comment) c.Assert(resp.StatusCode, checker.Equals, code, comment) diff --git a/integration-cli/docker_api_service_update_test.go b/integration-cli/docker_api_service_update_test.go index 15a21e579f..c61d122c18 100644 --- a/integration-cli/docker_api_service_update_test.go +++ b/integration-cli/docker_api_service_update_test.go @@ -4,11 +4,12 @@ package main import ( "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) -func setPortConfig(portConfig []swarm.PortConfig) serviceConstructor { +func setPortConfig(portConfig []swarm.PortConfig) daemon.ServiceConstructor { return func(s *swarm.Service) { if s.Spec.EndpointSpec == nil { s.Spec.EndpointSpec = &swarm.EndpointSpec{} @@ -22,16 +23,16 @@ func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) { // Create a service with a port mapping of 8080:8081. portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} - serviceID := d.createService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + serviceID := d.CreateService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) // Update the service: changed the port mapping from 8080:8081 to 8082:8083. updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} - remoteService := d.getService(c, serviceID) - d.updateService(c, remoteService, setPortConfig(updatedPortConfig)) + remoteService := d.GetService(c, serviceID) + d.UpdateService(c, remoteService, setPortConfig(updatedPortConfig)) // Inspect the service and verify port mapping. - updatedService := d.getService(c, serviceID) + updatedService := d.GetService(c, serviceID) c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) diff --git a/integration-cli/docker_api_swarm_test.go b/integration-cli/docker_api_swarm_test.go index b95327d21d..1d352ccc6c 100644 --- a/integration-cli/docker_api_swarm_test.go +++ b/integration-cli/docker_api_swarm_test.go @@ -14,6 +14,7 @@ import ( "time" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -23,13 +24,13 @@ var defaultReconciliationTimeout = 30 * time.Second func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { // todo: should find a better way to verify that components are running than /info d1 := s.AddDaemon(c, true, true) - info, err := d1.info() + info, err := d1.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.True) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) d2 := s.AddDaemon(c, true, false) - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) @@ -37,14 +38,14 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { // Leaving cluster c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.JoinTokens(c).Worker, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) @@ -60,12 +61,12 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { err = d2.Start() c.Assert(err, checker.IsNil) - info, err = d1.info() + info, err = d1.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.True) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) @@ -78,68 +79,68 @@ func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { // todo: error message differs depending if some components of token are valid d2 := s.AddDaemon(c, false, false) - err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}}) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err := d2.info() + info, err := d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}}) + err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.ListenAddr}}) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "invalid join token") - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - workerToken := d1.joinTokens(c).Worker + workerToken := d1.JoinTokens(c).Worker - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - info, err = d2.info() + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) // change tokens - d1.rotateTokens(c) + d1.RotateTokens(c) - err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}) + err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - workerToken = d1.joinTokens(c).Worker + workerToken = d1.JoinTokens(c).Worker - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - info, err = d2.info() + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) // change spec, don't change tokens - d1.updateSwarm(c, func(s *swarm.Spec) {}) + d1.UpdateSwarm(c, func(s *swarm.Spec) {}) - err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}}) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - info, err = d2.info() + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil) + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() + info, err = d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) } @@ -147,10 +148,10 @@ func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) { d1 := s.AddDaemon(c, true, true) d2 := s.AddDaemon(c, false, false) - splitToken := strings.Split(d1.joinTokens(c).Worker, "-") + splitToken := strings.Split(d1.JoinTokens(c).Worker, "-") splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" replacementToken := strings.Join(splitToken, "-") - err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}}) + err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.ListenAddr}}) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") } @@ -160,48 +161,48 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) { c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) d2 := s.AddDaemon(c, true, false) - info, err := d2.info() + info, err := d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { n.Spec.Role = swarm.NodeRoleManager }) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { n.Spec.Role = swarm.NodeRoleWorker }) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False) // Demoting last node should fail - node := d1.getNode(c, d1.NodeID) + node := d1.GetNode(c, d1.NodeID) node.Spec.Role = swarm.NodeRoleWorker url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) status, out, err := d1.SockRequest("POST", url, node.Spec) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out))) c.Assert(string(out), checker.Contains, "last manager of the swarm") - info, err = d1.info() + info, err = d1.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(info.ControlAvailable, checker.True) // Promote already demoted node - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { n.Spec.Role = swarm.NodeRoleManager }) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) } func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) { d := s.AddDaemon(c, true, true) - services := d.listServices(c) + services := d.ListServices(c) c.Assert(services, checker.NotNil) c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) } @@ -210,16 +211,16 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) { d := s.AddDaemon(c, true, true) instances := 2 - id := d.createService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + id := d.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) - service := d.getService(c, id) + service := d.GetService(c, id) instances = 5 - d.updateService(c, service, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + d.UpdateService(c, service, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) - d.removeService(c, service.ID) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + d.RemoveService(c, service.ID) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) } func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { @@ -230,23 +231,23 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks instances := 9 - id := d1.createService(c, simpleTestService, setInstances(instances)) + id := d1.CreateService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) // reconciliation on d2 node down c.Assert(d2.Stop(), checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) // test downscaling instances = 5 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) } @@ -255,27 +256,27 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) { d2 := s.AddDaemon(c, true, false) d3 := s.AddDaemon(c, true, false) - d1.createService(c, simpleTestService, setGlobalMode) + d1.CreateService(c, simpleTestService, setGlobalMode) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, 1) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) - waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.Equals, 1) d4 := s.AddDaemon(c, true, false) d5 := s.AddDaemon(c, true, false) - waitAndAssert(c, defaultReconciliationTimeout, d4.checkActiveContainerCount, checker.Equals, 1) - waitAndAssert(c, defaultReconciliationTimeout, d5.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d4.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d5.CheckActiveContainerCount, checker.Equals, 1) } func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { const nodeCount = 3 - var daemons [nodeCount]*SwarmDaemon + var daemons [nodeCount]*daemon.Swarm for i := 0; i < nodeCount; i++ { daemons[i] = s.AddDaemon(c, true, i == 0) } // wait for nodes ready - waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) // service image at start image1 := "busybox:latest" @@ -291,26 +292,26 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { // create service instances := 5 parallelism := 2 - id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) + id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, map[string]int{image1: instances}) // issue service update - service := daemons[0].getService(c, id) - daemons[0].updateService(c, service, setImage(image2)) + service := daemons[0].GetService(c, id) + daemons[0].UpdateService(c, service, setImage(image2)) // first batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, map[string]int{image1: instances - parallelism, image2: parallelism}) // 2nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) // 3nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, map[string]int{image2: instances}) // Roll back to the previous version. This uses the CLI because @@ -319,26 +320,26 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) // first batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, map[string]int{image2: instances - parallelism, image1: parallelism}) // 2nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, map[string]int{image2: instances - 2*parallelism, image1: 2 * parallelism}) // 3nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, map[string]int{image1: instances}) } func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { const nodeCount = 3 - var daemons [nodeCount]*SwarmDaemon + var daemons [nodeCount]*daemon.Swarm for i := 0; i < nodeCount; i++ { daemons[i] = s.AddDaemon(c, true, i == 0) } // wait for nodes ready - waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) // service image at start image1 := "busybox:latest" @@ -347,19 +348,19 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { // create service instances := 5 - id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) + id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, map[string]int{image1: instances}) // issue service update - service := daemons[0].getService(c, id) - daemons[0].updateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) + service := daemons[0].GetService(c, id) + daemons[0].UpdateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) // should update 2 tasks and then pause - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) - v, _ := daemons[0].checkServiceRunningTasks(id)(c) + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) + v, _ := daemons[0].CheckServiceRunningTasks(id)(c) c.Assert(v, checker.Equals, instances-2) // Roll back to the previous version. This uses the CLI because @@ -367,57 +368,57 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { out, err := daemons[0].Cmd("service", "update", "--rollback", id) c.Assert(err, checker.IsNil, check.Commentf(out)) - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, map[string]int{image1: instances}) } func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { const nodeCount = 3 - var daemons [nodeCount]*SwarmDaemon + var daemons [nodeCount]*daemon.Swarm for i := 0; i < nodeCount; i++ { daemons[i] = s.AddDaemon(c, true, i == 0) } // wait for nodes ready - waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) // create service constraints := []string{"node.role==worker"} instances := 3 - id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) // validate tasks are running on worker nodes - tasks := daemons[0].getServiceTasks(c, id) + tasks := daemons[0].GetServiceTasks(c, id) for _, task := range tasks { - node := daemons[0].getNode(c, task.NodeID) + node := daemons[0].GetNode(c, task.NodeID) c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) } //remove service - daemons[0].removeService(c, id) + daemons[0].RemoveService(c, id) // create service constraints = []string{"node.role!=worker"} - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) - tasks = daemons[0].getServiceTasks(c, id) + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) // validate tasks are running on manager nodes for _, task := range tasks { - node := daemons[0].getNode(c, task.NodeID) + node := daemons[0].GetNode(c, task.NodeID) c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) } //remove service - daemons[0].removeService(c, id) + daemons[0].RemoveService(c, id) // create service constraints = []string{"node.role==nosuchrole"} - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) // wait for tasks created - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) // let scheduler try time.Sleep(250 * time.Millisecond) // validate tasks are not assigned to any node - tasks = daemons[0].getServiceTasks(c, id) + tasks = daemons[0].GetServiceTasks(c, id) for _, task := range tasks { c.Assert(task.NodeID, checker.Equals, "") } @@ -425,23 +426,23 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { const nodeCount = 3 - var daemons [nodeCount]*SwarmDaemon + var daemons [nodeCount]*daemon.Swarm for i := 0; i < nodeCount; i++ { daemons[i] = s.AddDaemon(c, true, i == 0) } // wait for nodes ready - waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) - nodes := daemons[0].listNodes(c) + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].ListNodes(c) c.Assert(len(nodes), checker.Equals, nodeCount) // add labels to nodes - daemons[0].updateNode(c, nodes[0].ID, func(n *swarm.Node) { + daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { n.Spec.Annotations.Labels = map[string]string{ "security": "high", } }) for i := 1; i < nodeCount; i++ { - daemons[0].updateNode(c, nodes[i].ID, func(n *swarm.Node) { + daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) { n.Spec.Annotations.Labels = map[string]string{ "security": "low", } @@ -451,68 +452,68 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { // create service instances := 3 constraints := []string{"node.labels.security==high"} - id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) - tasks := daemons[0].getServiceTasks(c, id) + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].GetServiceTasks(c, id) // validate all tasks are running on nodes[0] for _, task := range tasks { c.Assert(task.NodeID, checker.Equals, nodes[0].ID) } //remove service - daemons[0].removeService(c, id) + daemons[0].RemoveService(c, id) // create service constraints = []string{"node.labels.security!=high"} - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) - tasks = daemons[0].getServiceTasks(c, id) + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) // validate all tasks are NOT running on nodes[0] for _, task := range tasks { c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) } //remove service - daemons[0].removeService(c, id) + daemons[0].RemoveService(c, id) constraints = []string{"node.labels.security==medium"} - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) // wait for tasks created - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) // let scheduler try time.Sleep(250 * time.Millisecond) - tasks = daemons[0].getServiceTasks(c, id) + tasks = daemons[0].GetServiceTasks(c, id) // validate tasks are not assigned for _, task := range tasks { c.Assert(task.NodeID, checker.Equals, "") } //remove service - daemons[0].removeService(c, id) + daemons[0].RemoveService(c, id) // multiple constraints constraints = []string{ "node.labels.security==high", fmt.Sprintf("node.id==%s", nodes[1].ID), } - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) // wait for tasks created - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) // let scheduler try time.Sleep(250 * time.Millisecond) - tasks = daemons[0].getServiceTasks(c, id) + tasks = daemons[0].GetServiceTasks(c, id) // validate tasks are not assigned for _, task := range tasks { c.Assert(task.NodeID, checker.Equals, "") } // make nodes[1] fulfills the constraints - daemons[0].updateNode(c, nodes[1].ID, func(n *swarm.Node) { + daemons[0].UpdateNode(c, nodes[1].ID, func(n *swarm.Node) { n.Spec.Annotations.Labels = map[string]string{ "security": "high", } }) // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) - tasks = daemons[0].getServiceTasks(c, id) + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) for _, task := range tasks { c.Assert(task.NodeID, checker.Equals, nodes[1].ID) } @@ -529,14 +530,14 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { time.Sleep(1 * time.Second) // make sure all daemons are ready to accept instances := 9 - d1.createService(c, simpleTestService, setInstances(instances)) + d1.CreateService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) - getContainers := func() map[string]*SwarmDaemon { - m := make(map[string]*SwarmDaemon) - for _, d := range []*SwarmDaemon{d1, d2, d3} { - for _, id := range d.activeContainers() { + getContainers := func() map[string]*daemon.Swarm { + m := make(map[string]*daemon.Swarm) + for _, d := range []*daemon.Swarm{d1, d2, d3} { + for _, id := range d.ActiveContainers() { m[id] = d } } @@ -553,7 +554,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { _, err := containers[toRemove].Cmd("stop", toRemove) c.Assert(err, checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) containers2 := getContainers() c.Assert(containers2, checker.HasLen, instances) @@ -579,7 +580,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { time.Sleep(time.Second) // give some time to handle the signal - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) containers2 = getContainers() c.Assert(containers2, checker.HasLen, instances) @@ -599,20 +600,20 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) { d3 := s.AddDaemon(c, true, true) // start a service by hitting each of the 3 managers - d1.createService(c, simpleTestService, func(s *swarm.Service) { + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "test1" }) - d2.createService(c, simpleTestService, func(s *swarm.Service) { + d2.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "test2" }) - d3.createService(c, simpleTestService, func(s *swarm.Service) { + d3.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "test3" }) // 3 services should be started now, because the requests were proxied to leader // query each node and make sure it returns 3 services - for _, d := range []*SwarmDaemon{d1, d2, d3} { - services := d.listServices(c) + for _, d := range []*daemon.Swarm{d1, d2, d3} { + services := d.ListServices(c) c.Assert(services, checker.HasLen, 3) } } @@ -624,23 +625,23 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { d3 := s.AddDaemon(c, true, true) // assert that the first node we made is the leader, and the other two are followers - c.Assert(d1.getNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) - c.Assert(d1.getNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) - c.Assert(d1.getNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) + c.Assert(d1.GetNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) + c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) + c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) d1.Stop() // stop the leader var ( - leader *SwarmDaemon // keep track of leader - followers []*SwarmDaemon // keep track of followers + leader *daemon.Swarm // keep track of leader + followers []*daemon.Swarm // keep track of followers ) - checkLeader := func(nodes ...*SwarmDaemon) checkF { + checkLeader := func(nodes ...*daemon.Swarm) checkF { return func(c *check.C) (interface{}, check.CommentInterface) { // clear these out before each run leader = nil followers = nil for _, d := range nodes { - if d.getNode(c, d.NodeID).ManagerStatus.Leader { + if d.GetNode(c, d.NodeID).ManagerStatus.Leader { leader = d } else { followers = append(followers, d) @@ -651,7 +652,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { return false, check.Commentf("no leader elected") } - return true, check.Commentf("elected %v", leader.id) + return true, check.Commentf("elected %v", leader.ID()) } } @@ -685,21 +686,21 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { d2 := s.AddDaemon(c, true, true) d3 := s.AddDaemon(c, true, true) - d1.createService(c, simpleTestService) + d1.CreateService(c, simpleTestService) c.Assert(d2.Stop(), checker.IsNil) // make sure there is a leader - waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) - d1.createService(c, simpleTestService, func(s *swarm.Service) { + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "top1" }) c.Assert(d3.Stop(), checker.IsNil) // make sure there is a leader - waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) var service swarm.Service simpleTestService(&service) @@ -711,9 +712,9 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { c.Assert(d2.Start(), checker.IsNil) // make sure there is a leader - waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) - d1.createService(c, simpleTestService, func(s *swarm.Service) { + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "top3" }) } @@ -723,12 +724,12 @@ func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) { d2 := s.AddDaemon(c, true, false) d3 := s.AddDaemon(c, true, false) - nodes := d1.listNodes(c) + nodes := d1.ListNodes(c) c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) loop0: for _, n := range nodes { - for _, d := range []*SwarmDaemon{d1, d2, d3} { + for _, d := range []*daemon.Swarm{d1, d2, d3} { if n.ID == d.NodeID { continue loop0 } @@ -740,13 +741,13 @@ loop0: func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) { d := s.AddDaemon(c, true, true) - nodes := d.listNodes(c) + nodes := d.ListNodes(c) - d.updateNode(c, nodes[0].ID, func(n *swarm.Node) { + d.UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { n.Spec.Availability = swarm.NodeAvailabilityPause }) - n := d.getNode(c, nodes[0].ID) + n := d.GetNode(c, nodes[0].ID) c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) } @@ -756,17 +757,17 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { d2 := s.AddDaemon(c, true, false) _ = s.AddDaemon(c, true, false) - nodes := d1.listNodes(c) + nodes := d1.ListNodes(c) c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) // Getting the info so we can take the NodeID - d2Info, err := d2.info() + d2Info, err := d2.SwarmInfo() c.Assert(err, checker.IsNil) // forceful removal of d2 should work - d1.removeNode(c, d2Info.NodeID, true) + d1.RemoveNode(c, d2Info.NodeID, true) - nodes = d1.listNodes(c) + nodes = d1.ListNodes(c) c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) // Restart the node that was removed @@ -777,7 +778,7 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { time.Sleep(1 * time.Second) // Make sure the node didn't rejoin - nodes = d1.listNodes(c) + nodes = d1.ListNodes(c) c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) } @@ -789,49 +790,49 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) { // start a service, expect balanced distribution instances := 8 - id := d1.createService(c, simpleTestService, setInstances(instances)) + id := d1.CreateService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) // drain d2, all containers should move to d1 - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { n.Spec.Availability = swarm.NodeAvailabilityDrain }) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) // set d2 back to active - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { n.Spec.Availability = swarm.NodeAvailabilityActive }) instances = 1 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) instances = 8 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) // drained node first so we don't get any old containers - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) - d2ContainerCount := len(d2.activeContainers()) + d2ContainerCount := len(d2.ActiveContainers()) // set d2 to paused, scale service up, only d1 gets new tasks - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { n.Spec.Availability = swarm.NodeAvailabilityPause }) instances = 14 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances-d2ContainerCount) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, d2ContainerCount) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances-d2ContainerCount) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, d2ContainerCount) } @@ -839,18 +840,18 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) { d := s.AddDaemon(c, true, true) instances := 2 - d.createService(c, simpleTestService, setInstances(instances)) + d.CreateService(c, simpleTestService, setInstances(instances)) id, err := d.Cmd("run", "-d", "busybox", "top") c.Assert(err, checker.IsNil) id = strings.TrimSpace(id) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances+1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1) c.Assert(d.Leave(false), checker.NotNil) c.Assert(d.Leave(true), checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) id2, err := d.Cmd("ps", "-q") c.Assert(err, checker.IsNil) @@ -873,13 +874,13 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) { c.Assert(err, check.NotNil) c.Assert(err.Error(), checker.Contains, "Timeout was reached") - info, err := d2.info() + info, err := d2.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) c.Assert(d2.Leave(true), checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) id2, err := d2.Cmd("ps", "-q") c.Assert(err, checker.IsNil) @@ -896,12 +897,12 @@ func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) { c.Assert(err, check.NotNil) c.Assert(err.Error(), checker.Contains, "Timeout was reached") - waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) c.Assert(d.Stop(), checker.IsNil) c.Assert(d.Start(), checker.IsNil) - info, err := d.info() + info, err := d.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) } @@ -910,43 +911,43 @@ func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) { d1 := s.AddDaemon(c, true, true) instances := 2 - id := d1.createService(c, simpleTestService, setInstances(instances)) + id := d1.CreateService(c, simpleTestService, setInstances(instances)) - d1.getService(c, id) + d1.GetService(c, id) d1.Stop() d1.Start() - d1.getService(c, id) + d1.GetService(c, id) d2 := s.AddDaemon(c, true, true) - d2.getService(c, id) + d2.GetService(c, id) d2.Stop() d2.Start() - d2.getService(c, id) + d2.GetService(c, id) d3 := s.AddDaemon(c, true, true) - d3.getService(c, id) + d3.GetService(c, id) d3.Stop() d3.Start() - d3.getService(c, id) + d3.GetService(c, id) d3.Kill() time.Sleep(1 * time.Second) // time to handle signal d3.Start() - d3.getService(c, id) + d3.GetService(c, id) } func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) { d := s.AddDaemon(c, true, true) instances := 2 - id := d.createService(c, simpleTestService, setInstances(instances)) + id := d.CreateService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - containers := d.activeContainers() + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + containers := d.ActiveContainers() instances = 4 - d.updateService(c, d.getService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - containers2 := d.activeContainers() + d.UpdateService(c, d.GetService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + containers2 := d.ActiveContainers() loop0: for _, c1 := range containers { @@ -982,15 +983,15 @@ func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { d2 := s.AddDaemon(c, true, true) instances := 2 - id := d1.createService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) // drain d2, all containers should move to d1 - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) { n.Spec.Availability = swarm.NodeAvailabilityDrain }) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) c.Assert(d2.Stop(), checker.IsNil) @@ -999,18 +1000,18 @@ func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { Spec: swarm.Spec{}, }), checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) d3 := s.AddDaemon(c, true, true) - info, err := d3.info() + info, err := d3.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.True) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) instances = 4 - d3.updateService(c, d3.getService(c, id), setInstances(instances)) + d3.UpdateService(c, d3.GetService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) } func simpleTestService(s *swarm.Service) { @@ -1064,7 +1065,7 @@ func serviceForUpdate(s *swarm.Service) { s.Spec.Name = "updatetest" } -func setInstances(replicas int) serviceConstructor { +func setInstances(replicas int) daemon.ServiceConstructor { ureplicas := uint64(replicas) return func(s *swarm.Service) { s.Spec.Mode = swarm.ServiceMode{ @@ -1075,31 +1076,31 @@ func setInstances(replicas int) serviceConstructor { } } -func setImage(image string) serviceConstructor { +func setImage(image string) daemon.ServiceConstructor { return func(s *swarm.Service) { s.Spec.TaskTemplate.ContainerSpec.Image = image } } -func setFailureAction(failureAction string) serviceConstructor { +func setFailureAction(failureAction string) daemon.ServiceConstructor { return func(s *swarm.Service) { s.Spec.UpdateConfig.FailureAction = failureAction } } -func setMaxFailureRatio(maxFailureRatio float32) serviceConstructor { +func setMaxFailureRatio(maxFailureRatio float32) daemon.ServiceConstructor { return func(s *swarm.Service) { s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio } } -func setParallelism(parallelism uint64) serviceConstructor { +func setParallelism(parallelism uint64) daemon.ServiceConstructor { return func(s *swarm.Service) { s.Spec.UpdateConfig.Parallelism = parallelism } } -func setConstraints(constraints []string) serviceConstructor { +func setConstraints(constraints []string) daemon.ServiceConstructor { return func(s *swarm.Service) { if s.Spec.TaskTemplate.Placement == nil { s.Spec.TaskTemplate.Placement = &swarm.Placement{} @@ -1114,7 +1115,7 @@ func setGlobalMode(s *swarm.Service) { } } -func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount int) { +func checkClusterHealth(c *check.C, cl []*daemon.Swarm, managerCount, workerCount int) { var totalMCount, totalWCount int for _, d := range cl { @@ -1125,7 +1126,7 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error checkInfo := func(c *check.C) (interface{}, check.CommentInterface) { - info, err = d.info() + info, err = d.SwarmInfo() return err, check.Commentf("cluster not ready in time") } waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil) @@ -1138,12 +1139,12 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount totalMCount++ var mCount, wCount int - for _, n := range d.listNodes(c) { + for _, n := range d.ListNodes(c) { waitReady := func(c *check.C) (interface{}, check.CommentInterface) { if n.Status.State == swarm.NodeStateReady { return true, nil } - nn := d.getNode(c, n.ID) + nn := d.GetNode(c, n.ID) n = *nn return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID) } @@ -1153,7 +1154,7 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount if n.Spec.Availability == swarm.NodeAvailabilityActive { return true, nil } - nn := d.getNode(c, n.ID) + nn := d.GetNode(c, n.ID) n = *nn return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID) } @@ -1181,10 +1182,10 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { mCount, wCount := 5, 1 - var nodes []*SwarmDaemon + var nodes []*daemon.Swarm for i := 0; i < mCount; i++ { manager := s.AddDaemon(c, true, true) - info, err := manager.info() + info, err := manager.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.True) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) @@ -1193,7 +1194,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { for i := 0; i < wCount; i++ { worker := s.AddDaemon(c, true, false) - info, err := worker.info() + info, err := worker.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) @@ -1207,13 +1208,14 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { errs := make(chan error, len(nodes)) for _, d := range nodes { - go func(daemon *SwarmDaemon) { + go func(daemon *daemon.Swarm) { defer wg.Done() if err := daemon.Stop(); err != nil { errs <- err } + // FIXME(vdemeester) This is duplicated… if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - daemon.root = filepath.Dir(daemon.root) + daemon.Root = filepath.Dir(daemon.Root) } }(d) } @@ -1231,7 +1233,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { errs := make(chan error, len(nodes)) for _, d := range nodes { - go func(daemon *SwarmDaemon) { + go func(daemon *daemon.Swarm) { defer wg.Done() if err := daemon.Start("--iptables=false"); err != nil { errs <- err @@ -1252,10 +1254,10 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) { d := s.AddDaemon(c, true, true) instances := 2 - id := d.createService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + id := d.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) - service := d.getService(c, id) + service := d.GetService(c, id) instances = 5 setInstances(instances)(service) @@ -1263,13 +1265,13 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) { status, out, err := d.SockRequest("POST", url, service.Spec) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) } func (s *DockerSwarmSuite) TestAPISwarmSecretsEmptyList(c *check.C) { d := s.AddDaemon(c, true, true) - secrets := d.listSecrets(c) + secrets := d.ListSecrets(c) c.Assert(secrets, checker.NotNil) c.Assert(len(secrets), checker.Equals, 0, check.Commentf("secrets: %#v", secrets)) } @@ -1278,7 +1280,7 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { d := s.AddDaemon(c, true, true) testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: testName, }, @@ -1286,7 +1288,7 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - secrets := d.listSecrets(c) + secrets := d.ListSecrets(c) c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets)) name := secrets[0].Spec.Annotations.Name c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name)) @@ -1296,7 +1298,7 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { d := s.AddDaemon(c, true, true) testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: testName, }, @@ -1304,10 +1306,10 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - secret := d.getSecret(c, id) + secret := d.GetSecret(c, id) c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) - d.deleteSecret(c, secret.ID) + d.DeleteSecret(c, secret.ID) status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) c.Assert(err, checker.IsNil) c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) diff --git a/integration-cli/docker_api_test.go b/integration-cli/docker_api_test.go index 3b38ba96f2..714344d8d4 100644 --- a/integration-cli/docker_api_test.go +++ b/integration-cli/docker_api_test.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/docker/docker/api" + "github.com/docker/docker/pkg/integration" "github.com/docker/docker/pkg/integration/checker" icmd "github.com/docker/docker/pkg/integration/cmd" "github.com/go-check/check" @@ -78,7 +79,7 @@ func (s *DockerSuite) TestAPIErrorJSON(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") } @@ -91,7 +92,7 @@ func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") } @@ -102,7 +103,7 @@ func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") } @@ -112,7 +113,7 @@ func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") - b, err := readBody(body) + b, err := integration.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") } diff --git a/integration-cli/docker_cli_authz_plugin_v2_test.go b/integration-cli/docker_cli_authz_plugin_v2_test.go index 2b66aa3b3b..0a5235fd0b 100644 --- a/integration-cli/docker_cli_authz_plugin_v2_test.go +++ b/integration-cli/docker_cli_authz_plugin_v2_test.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -26,12 +27,14 @@ func init() { type DockerAuthzV2Suite struct { ds *DockerSuite - d *Daemon + d *daemon.Daemon } func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) { testRequires(c, DaemonIsLinux, Network) - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) c.Assert(s.d.Start(), check.IsNil) } diff --git a/integration-cli/docker_cli_authz_unix_test.go b/integration-cli/docker_cli_authz_unix_test.go index 55d03587b9..a3d2c94e2a 100644 --- a/integration-cli/docker_cli_authz_unix_test.go +++ b/integration-cli/docker_cli_authz_unix_test.go @@ -22,6 +22,7 @@ import ( "net/http/httputil" "net/url" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/plugins" @@ -48,7 +49,7 @@ func init() { type DockerAuthzSuite struct { server *httptest.Server ds *DockerSuite - d *Daemon + d *daemon.Daemon ctrl *authorizationController } @@ -63,7 +64,9 @@ type authorizationController struct { } func (s *DockerAuthzSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) s.ctrl = &authorizationController{} } @@ -285,7 +288,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) { s.ctrl.reqRes.Allow = false s.ctrl.resRes.Msg = unauthorizedMessage - daemonURL, err := url.Parse(s.d.sock()) + daemonURL, err := url.Parse(s.d.Sock()) conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) c.Assert(err, check.IsNil) @@ -328,7 +331,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) // Add another command to to enable event pipelining - eventsCmd := exec.Command(dockerBinary, "--host", s.d.sock(), "events", "--since", startTime) + eventsCmd := exec.Command(dockerBinary, "--host", s.d.Sock(), "events", "--since", startTime) stdout, err := eventsCmd.StdoutPipe() if err != nil { c.Assert(err, check.IsNil) @@ -349,7 +352,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { out, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) containerID := strings.TrimSpace(out) - c.Assert(s.d.waitRun(containerID), checker.IsNil) + c.Assert(s.d.WaitRun(containerID), checker.IsNil) events := map[string]chan bool{ "create": make(chan bool, 1), @@ -451,7 +454,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) { s.ctrl.resRes.Allow = true c.Assert(s.d.LoadBusybox(), check.IsNil) - daemonURL, err := url.Parse(s.d.sock()) + daemonURL, err := url.Parse(s.d.Sock()) conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) c.Assert(err, check.IsNil) diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go index c2d85461a8..c28ffaca98 100644 --- a/integration-cli/docker_cli_by_digest_test.go +++ b/integration-cli/docker_cli_by_digest_test.go @@ -636,7 +636,7 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { // digest verification for the target layer digest. // Remove distribution cache to force a re-pull of the blobs - if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.StorageDriver(), "distribution")); err != nil { c.Fatalf("error clearing distribution cache: %v", err) } @@ -679,7 +679,7 @@ func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { // digest verification for the target layer digest. // Remove distribution cache to force a re-pull of the blobs - if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.StorageDriver(), "distribution")); err != nil { c.Fatalf("error clearing distribution cache: %v", err) } diff --git a/integration-cli/docker_cli_daemon_plugins_test.go b/integration-cli/docker_cli_daemon_plugins_test.go index 7ee068b37a..2f710d592b 100644 --- a/integration-cli/docker_cli_daemon_plugins_test.go +++ b/integration-cli/docker_cli_daemon_plugins_test.go @@ -133,7 +133,7 @@ func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) } }() - if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + if err := s.d.Interrupt(); err != nil { c.Fatalf("Could not kill daemon: %v", err) } @@ -166,12 +166,12 @@ func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { } }() - if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + if err := s.d.Interrupt(); err != nil { c.Fatalf("Could not kill daemon: %v", err) } for { - if err := syscall.Kill(s.d.cmd.Process.Pid, 0); err == syscall.ESRCH { + if err := syscall.Kill(s.d.Pid(), 0); err == syscall.ESRCH { break } } diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index d317e8bb63..d617509e09 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -20,6 +20,7 @@ import ( "syscall" "time" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" icmd "github.com/docker/docker/pkg/integration/cmd" "github.com/docker/docker/pkg/mount" @@ -171,7 +172,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) // wait test1 to stop - hostArgs := []string{"--host", s.d.sock()} + hostArgs := []string{"--host", s.d.Sock()} err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...) c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) @@ -205,7 +206,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) { testRequires(c, Devicemapper) c.Assert(s.d.Start(), check.IsNil) - oldBasesizeBytes := s.d.getBaseDeviceSize(c) + oldBasesizeBytes := s.d.GetBaseDeviceSize(c) var newBasesizeBytes int64 = 1073741824 //1GB in bytes if newBasesizeBytes < oldBasesizeBytes { @@ -220,7 +221,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { testRequires(c, Devicemapper) c.Assert(s.d.Start(), check.IsNil) - oldBasesizeBytes := s.d.getBaseDeviceSize(c) + oldBasesizeBytes := s.d.GetBaseDeviceSize(c) var newBasesizeBytes int64 = 53687091200 //50GB in bytes @@ -231,7 +232,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err)) - basesizeAfterRestart := s.d.getBaseDeviceSize(c) + basesizeAfterRestart := s.d.GetBaseDeviceSize(c) newBasesize, err := convertBasesize(newBasesizeBytes) c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err)) c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set")) @@ -466,7 +467,8 @@ func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { if err := s.d.Start("--log-level=debug"); err != nil { c.Fatal(err) } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) } @@ -477,7 +479,8 @@ func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { if err := s.d.Start("--log-level=fatal"); err != nil { c.Fatal(err) } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) } @@ -487,7 +490,8 @@ func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { if err := s.d.Start("-D"); err != nil { c.Fatal(err) } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) } @@ -497,7 +501,8 @@ func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { if err := s.d.Start("--debug"); err != nil { c.Fatal(err) } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) } @@ -507,7 +512,8 @@ func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { c.Fatal(err) } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) } @@ -636,7 +642,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") c.Assert(err, check.IsNil) - containerIP := d.findContainerIP("ExtContainer") + containerIP := d.FindContainerIP("ExtContainer") ip := net.ParseIP(containerIP) c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, check.Commentf("Container IP-Address must be in the same subnet range : %s", @@ -731,7 +737,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") c.Assert(err, check.IsNil) - containerIP := d.findContainerIP("test") + containerIP := d.FindContainerIP("test") ip = net.ParseIP(containerIP) c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, check.Commentf("Container IP-Address must be in the same subnet range : %s", @@ -1041,8 +1047,8 @@ func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *che _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") c.Assert(err, check.IsNil) - childIP := s.d.findContainerIP("child") - parentIP := s.d.findContainerIP("parent") + childIP := s.d.FindContainerIP("child") + parentIP := s.d.FindContainerIP("parent") sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} @@ -1140,10 +1146,10 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") c.Assert(err, check.IsNil, check.Commentf(out)) - id, err := s.d.getIDByName("test") + id, err := s.d.GetIDByName("test") c.Assert(err, check.IsNil) - logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { c.Fatal(err) @@ -1182,10 +1188,10 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { if err != nil { c.Fatal(out, err) } - id, err := s.d.getIDByName("test") + id, err := s.d.GetIDByName("test") c.Assert(err, check.IsNil) - logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) @@ -1201,10 +1207,10 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { if err != nil { c.Fatal(out, err) } - id, err := s.d.getIDByName("test") + id, err := s.d.GetIDByName("test") c.Assert(err, check.IsNil) - logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) @@ -1220,10 +1226,10 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { if err != nil { c.Fatal(out, err) } - id, err := s.d.getIDByName("test") + id, err := s.d.GetIDByName("test") c.Assert(err, check.IsNil) - logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { c.Fatal(err) @@ -1340,7 +1346,8 @@ func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), "Public Key ID does not match") { c.Fatal("Missing KeyID message from daemon logs") @@ -1496,10 +1503,10 @@ func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) { } } -func pingContainers(c *check.C, d *Daemon, expectFailure bool) { +func pingContainers(c *check.C, d *daemon.Daemon, expectFailure bool) { var dargs []string if d != nil { - dargs = []string{"--host", d.sock()} + dargs = []string{"--host", d.Sock()} } args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") @@ -1523,7 +1530,8 @@ func pingContainers(c *check.C, d *Daemon, expectFailure bool) { func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { c.Assert(s.d.StartWithBusybox(), check.IsNil) - socket := filepath.Join(s.d.folder, "docker.sock") + // socket := filepath.Join(s.d.folder, "docker.sock") + socket := s.d.Sock() out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) @@ -1538,12 +1546,12 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *chec out, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) id := strings.TrimSpace(out) - c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + c.Assert(s.d.Signal(os.Kill), check.IsNil) mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) // container mounts should exist even after daemon has crashed. - comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) // kill the container @@ -1560,7 +1568,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *chec // Now, container mounts should be gone. mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) } @@ -1573,14 +1581,14 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) { id := strings.TrimSpace(out) // Send SIGINT and daemon should clean up - c.Assert(s.d.cmd.Process.Signal(os.Interrupt), check.IsNil) + c.Assert(s.d.Signal(os.Interrupt), check.IsNil) // Wait for the daemon to stop. - c.Assert(<-s.d.wait, checker.IsNil) + c.Assert(<-s.d.Wait, checker.IsNil) mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) } @@ -1813,18 +1821,20 @@ func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { } } +// FIXME(vdemeester) Use a new daemon instance instead of the Suite one func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { - s.d.useDefaultHost = true + s.d.UseDefaultHost = true defer func() { - s.d.useDefaultHost = false + s.d.UseDefaultHost = false }() c.Assert(s.d.Start(), check.IsNil) } +// FIXME(vdemeester) Use a new daemon instance instead of the Suite one func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) { - s.d.useDefaultTLSHost = true + s.d.UseDefaultTLSHost = true defer func() { - s.d.useDefaultTLSHost = false + s.d.UseDefaultTLSHost = false }() if err := s.d.Start( "--tlsverify", @@ -2144,12 +2154,12 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) id := strings.TrimSpace(out) - c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + c.Assert(s.d.Signal(os.Kill), check.IsNil) mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) // container mounts should exist even after daemon has crashed. - comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) // restart daemon. @@ -2172,7 +2182,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { // Now, container mounts should be gone. mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) } @@ -2350,7 +2360,7 @@ func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { _, err = configFile.Write([]byte(daemonConfig)) c.Assert(err, checker.IsNil) - err = s.d.reloadConfig() + err = s.d.ReloadConfig() c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) out, err := s.d.Cmd("info") @@ -2380,7 +2390,8 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) { expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) } @@ -2402,7 +2413,8 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) @@ -2412,13 +2424,15 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + // syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) time.Sleep(3 * time.Second) expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` - content, _ = ioutil.ReadFile(s.d.logFile.Name()) + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) } @@ -2440,7 +2454,8 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) @@ -2450,13 +2465,15 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) + // syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) time.Sleep(3 * time.Second) expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` - content, _ = ioutil.ReadFile(s.d.logFile.Name()) + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) @@ -2466,13 +2483,14 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) time.Sleep(3 * time.Second) expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` - content, _ = ioutil.ReadFile(s.d.logFile.Name()) + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) } @@ -2480,8 +2498,9 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { err := s.d.StartWithBusybox("-b=none", "--iptables=false") c.Assert(err, check.IsNil) - s.d.c.Logf("dockerBinary %s", dockerBinary) - out, code, err := s.d.buildImageWithOut("busyboxs", + // s.d.c.Logf("dockerBinary %s", dockerBinary) + c.Logf("dockerBinary %s", dockerBinary) + out, code, err := s.d.BuildImageWithOut("busyboxs", `FROM busybox RUN cat /etc/hosts`, false) comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) @@ -2576,7 +2595,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { } ` ioutil.WriteFile(configName, []byte(config), 0644) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) // Give daemon time to reload config <-time.After(1 * time.Second) @@ -2605,11 +2624,12 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { } ` ioutil.WriteFile(configName, []byte(config), 0644) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) // Give daemon time to reload config <-time.After(1 * time.Second) - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) // Check that we can select a default runtime @@ -2630,7 +2650,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { } ` ioutil.WriteFile(configName, []byte(config), 0644) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) // Give daemon time to reload config <-time.After(1 * time.Second) @@ -2688,7 +2708,8 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { err = s.d.Start("--add-runtime", "runc=my-runc") c.Assert(err, check.NotNil) - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) // Check that we can select a default runtime @@ -2778,18 +2799,18 @@ func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { out, err = d.Cmd("inspect", "--type=image", "--format={{.ID}}", "busybox:latest") c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(d.Stop(), checker.IsNil) - <-d.wait + <-d.Wait imageID := strings.TrimSpace(out) volumeID := stringid.GenerateNonCryptoID() - vfsPath := filepath.Join(d.root, "vfs", "dir", volumeID) + vfsPath := filepath.Join(d.Root, "vfs", "dir", volumeID) c.Assert(os.MkdirAll(vfsPath, 0755), checker.IsNil) config := []byte(` { "ID": "` + id + `", "Name": "hello", - "Driver": "` + d.storageDriver + `", + "Driver": "` + d.StorageDriver() + `", "Image": "` + imageID + `", "Config": {"Image": "busybox:latest"}, "NetworkSettings": {}, @@ -2806,7 +2827,7 @@ func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { } `) - configPath := filepath.Join(d.root, "containers", id, "config.v2.json") + configPath := filepath.Join(d.Root, "containers", id, "config.v2.json") err = ioutil.WriteFile(configPath, config, 600) err = d.Start() c.Assert(err, checker.IsNil) @@ -2883,15 +2904,16 @@ func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *check.C) { _, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGINT) + c.Assert(s.d.Signal(syscall.SIGINT), checker.IsNil) select { - case <-s.d.wait: + case <-s.d.Wait: case <-time.After(5 * time.Second): } expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMessage) } @@ -2916,14 +2938,15 @@ func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) select { - case <-s.d.wait: + case <-s.d.Wait: case <-time.After(3 * time.Second): } expectedMessage := `level=debug msg="Reset Shutdown Timeout: 5"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMessage) } diff --git a/integration-cli/docker_cli_events_unix_test.go b/integration-cli/docker_cli_events_unix_test.go index dc91667116..e7641ff048 100644 --- a/integration-cli/docker_cli_events_unix_test.go +++ b/integration-cli/docker_cli_events_unix_test.go @@ -422,7 +422,7 @@ func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) time.Sleep(3 * time.Second) @@ -460,7 +460,7 @@ func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { } c.Assert(daemonID, checker.Not(checker.Equals), "") - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) time.Sleep(3 * time.Second) diff --git a/integration-cli/docker_cli_external_graphdriver_unix_test.go b/integration-cli/docker_cli_external_graphdriver_unix_test.go index 5a35c6893e..bdcc9fa507 100644 --- a/integration-cli/docker_cli_external_graphdriver_unix_test.go +++ b/integration-cli/docker_cli_external_graphdriver_unix_test.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/plugins" "github.com/go-check/check" @@ -29,7 +30,7 @@ type DockerExternalGraphdriverSuite struct { server *httptest.Server jserver *httptest.Server ds *DockerSuite - d *Daemon + d *daemon.Daemon ec map[string]*graphEventsCounter } @@ -51,7 +52,9 @@ type graphEventsCounter struct { } func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) } func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { diff --git a/integration-cli/docker_cli_external_volume_driver_unix_test.go b/integration-cli/docker_cli_external_volume_driver_unix_test.go index b5fb0b2fb1..e773a1dcb0 100644 --- a/integration-cli/docker_cli_external_volume_driver_unix_test.go +++ b/integration-cli/docker_cli_external_volume_driver_unix_test.go @@ -16,6 +16,7 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" @@ -44,12 +45,14 @@ type eventCounter struct { type DockerExternalVolumeSuite struct { ds *DockerSuite - d *Daemon + d *daemon.Daemon *volumePlugin } func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) s.ec = &eventCounter{} } diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go index 62ce7e22f2..7f37f6b6ae 100644 --- a/integration-cli/docker_cli_info_test.go +++ b/integration-cli/docker_cli_info_test.go @@ -6,6 +6,7 @@ import ( "net" "strings" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -70,7 +71,9 @@ func (s *DockerSuite) TestInfoFormat(c *check.C) { func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) - d := NewDaemon(c) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) discoveryBackend := "consul://consuladdr:consulport/some/path" discoveryAdvertise := "1.1.1.1:2375" err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) @@ -88,7 +91,9 @@ func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) - d := NewDaemon(c) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) discoveryBackend := "consul://consuladdr:consulport/some/path" // --cluster-advertise with an invalid string is an error @@ -105,7 +110,9 @@ func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { testRequires(c, SameHostDaemon, Network, DaemonIsLinux) - d := NewDaemon(c) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) discoveryBackend := "consul://consuladdr:consulport/some/path" discoveryAdvertise := "eth0" @@ -171,7 +178,9 @@ func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { func (s *DockerSuite) TestInfoDebug(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) - d := NewDaemon(c) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) err := d.Start("--debug") c.Assert(err, checker.IsNil) defer d.Stop() @@ -193,7 +202,9 @@ func (s *DockerSuite) TestInsecureRegistries(c *check.C) { registryCIDR := "192.168.1.0/24" registryHost := "insecurehost.com:5000" - d := NewDaemon(c) + d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) err := d.Start("--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) c.Assert(err, checker.IsNil) defer d.Stop() diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go index 0e3b88cad1..7b29118af3 100644 --- a/integration-cli/docker_cli_network_unix_test.go +++ b/integration-cli/docker_cli_network_unix_test.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" icmd "github.com/docker/docker/pkg/integration/cmd" "github.com/docker/docker/pkg/stringid" @@ -43,11 +44,13 @@ func init() { type DockerNetworkSuite struct { server *httptest.Server ds *DockerSuite - d *Daemon + d *daemon.Daemon } func (s *DockerNetworkSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) } func (s *DockerNetworkSuite) TearDownTest(c *check.C) { @@ -994,9 +997,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C c.Assert(err, checker.IsNil) // Kill daemon and restart - if err = s.d.cmd.Process.Kill(); err != nil { - c.Fatal(err) - } + c.Assert(s.d.Kill(), checker.IsNil) server.Close() @@ -1064,7 +1065,7 @@ func (s *DockerSuite) TestInspectAPIMultipleNetworks(c *check.C) { c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) } -func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []string) { +func connectContainerToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { // Run a container on the default network out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -1078,7 +1079,7 @@ func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []strin } } -func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) { +func verifyContainerIsConnectedToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { // Verify container is connected to all the networks for _, nw := range nws { out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) @@ -1115,10 +1116,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRe verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) // Kill daemon and restart - if err := s.d.cmd.Process.Kill(); err != nil { - c.Fatal(err) - } - s.d.Restart() + c.Assert(s.d.Kill(), checker.IsNil) + c.Assert(s.d.Restart(), checker.IsNil) // Restart container _, err := s.d.Cmd("start", cName) @@ -1144,21 +1143,17 @@ func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c c.Assert(err, checker.IsNil, check.Commentf(out)) // verfiy container has finished starting before killing daemon - err = s.d.waitRun(cName) + err = s.d.WaitRun(cName) c.Assert(err, checker.IsNil) } // Kill daemon ungracefully and restart - if err := s.d.cmd.Process.Kill(); err != nil { - c.Fatal(err) - } - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } + c.Assert(s.d.Kill(), checker.IsNil) + c.Assert(s.d.Restart(), checker.IsNil) // make sure all the containers are up and running for i := 0; i < 10; i++ { - err := s.d.waitRun(fmt.Sprintf("hostc-%d", i)) + err := s.d.WaitRun(fmt.Sprintf("hostc-%d", i)) c.Assert(err, checker.IsNil) } } diff --git a/integration-cli/docker_cli_prune_unix_test.go b/integration-cli/docker_cli_prune_unix_test.go index dabbc72081..85a9cc8c23 100644 --- a/integration-cli/docker_cli_prune_unix_test.go +++ b/integration-cli/docker_cli_prune_unix_test.go @@ -6,11 +6,12 @@ import ( "strconv" "strings" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) -func pruneNetworkAndVerify(c *check.C, d *SwarmDaemon, kept, pruned []string) { +func pruneNetworkAndVerify(c *check.C, d *daemon.Swarm, kept, pruned []string) { _, err := d.Cmd("network", "prune", "--force") c.Assert(err, checker.IsNil) out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") @@ -46,7 +47,7 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, replicas+1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas+1) // prune and verify pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"}) @@ -56,14 +57,14 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { c.Assert(err, checker.IsNil) _, err = d.Cmd("service", "rm", serviceName) c.Assert(err, checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"}) } func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { c.Assert(s.d.StartWithBusybox(), checker.IsNil) - out, _, err := s.d.buildImageWithOut("test", + out, _, err := s.d.BuildImageWithOut("test", `FROM busybox LABEL foo=bar`, true, "-q") c.Assert(err, checker.IsNil) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index fa967367e4..151940b7fa 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -4439,7 +4439,7 @@ func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { name := "test-A" _, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top") c.Assert(err, checker.IsNil) - c.Assert(s.d.waitRun(name), check.IsNil) + c.Assert(s.d.WaitRun(name), check.IsNil) out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) c.Assert(err, checker.IsNil) @@ -4448,7 +4448,7 @@ func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { name = "test-B" _, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top") c.Assert(err, checker.IsNil) - c.Assert(s.d.waitRun(name), check.IsNil) + c.Assert(s.d.WaitRun(name), check.IsNil) out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) c.Assert(err, checker.IsNil) diff --git a/integration-cli/docker_cli_secret_create_test.go b/integration-cli/docker_cli_secret_create_test.go index 9c45f8a0ae..fdfc793e16 100644 --- a/integration-cli/docker_cli_secret_create_test.go +++ b/integration-cli/docker_cli_secret_create_test.go @@ -12,7 +12,7 @@ func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) { d := s.AddDaemon(c, true, true) testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: testName, }, @@ -20,7 +20,7 @@ func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) { }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - secret := d.getSecret(c, id) + secret := d.GetSecret(c, id) c.Assert(secret.Spec.Name, checker.Equals, testName) } @@ -28,7 +28,7 @@ func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { d := s.AddDaemon(c, true, true) testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: testName, Labels: map[string]string{ @@ -40,7 +40,7 @@ func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - secret := d.getSecret(c, id) + secret := d.GetSecret(c, id) c.Assert(secret.Spec.Name, checker.Equals, testName) c.Assert(len(secret.Spec.Labels), checker.Equals, 2) c.Assert(secret.Spec.Labels["key1"], checker.Equals, "value1") @@ -52,7 +52,7 @@ func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { d := s.AddDaemon(c, true, true) name := "foo" - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: name, }, @@ -60,7 +60,7 @@ func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - fake := d.createSecret(c, swarm.SecretSpec{ + fake := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: id, }, diff --git a/integration-cli/docker_cli_secret_inspect_test.go b/integration-cli/docker_cli_secret_inspect_test.go index 0985a2bd59..3a4f0edc8a 100644 --- a/integration-cli/docker_cli_secret_inspect_test.go +++ b/integration-cli/docker_cli_secret_inspect_test.go @@ -14,7 +14,7 @@ func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) { d := s.AddDaemon(c, true, true) testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: testName, }, @@ -22,7 +22,7 @@ func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) { }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - secret := d.getSecret(c, id) + secret := d.GetSecret(c, id) c.Assert(secret.Spec.Name, checker.Equals, testName) out, err := d.Cmd("secret", "inspect", testName) @@ -41,7 +41,7 @@ func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { "test1", } for _, n := range testNames { - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: n, }, @@ -49,7 +49,7 @@ func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - secret := d.getSecret(c, id) + secret := d.GetSecret(c, id) c.Assert(secret.Spec.Name, checker.Equals, n) } diff --git a/integration-cli/docker_cli_service_create_test.go b/integration-cli/docker_cli_service_create_test.go index 9e8b1e9956..0fabd7a267 100644 --- a/integration-cli/docker_cli_service_create_test.go +++ b/integration-cli/docker_cli_service_create_test.go @@ -22,14 +22,14 @@ func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { var tasks []swarm.Task waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) + tasks = d.GetServiceTasks(c, id) return len(tasks) > 0, nil }, checker.Equals, true) task := tasks[0] waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) } return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil }, checker.Equals, true) @@ -67,7 +67,7 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { serviceName := "test-service-secret" testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: testName, }, @@ -97,7 +97,7 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) { serviceName := "test-service-secret" testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: testName, }, @@ -129,14 +129,14 @@ func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { var tasks []swarm.Task waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) + tasks = d.GetServiceTasks(c, id) return len(tasks) > 0, nil }, checker.Equals, true) task := tasks[0] waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) } return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil }, checker.Equals, true) diff --git a/integration-cli/docker_cli_service_health_test.go b/integration-cli/docker_cli_service_health_test.go index 30580f6be3..81df257e56 100644 --- a/integration-cli/docker_cli_service_health_test.go +++ b/integration-cli/docker_cli_service_health_test.go @@ -22,7 +22,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { // build image with health-check // note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build imageName := "testhealth" - _, _, err := d.buildImageWithOut(imageName, + _, _, err := d.BuildImageWithOut(imageName, `FROM busybox RUN touch /status HEALTHCHECK --interval=1s --timeout=1s --retries=1\ @@ -37,7 +37,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { var tasks []swarm.Task waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) + tasks = d.GetServiceTasks(c, id) return tasks, nil }, checker.HasLen, 1) @@ -45,7 +45,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { // wait for task to start waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateRunning) containerID := task.Status.ContainerStatus.ContainerID @@ -66,7 +66,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { // Task should be terminated waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateFailed) @@ -84,7 +84,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { // service started from this image won't pass health check imageName := "testhealth" - _, _, err := d.buildImageWithOut(imageName, + _, _, err := d.BuildImageWithOut(imageName, `FROM busybox HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ CMD cat /status`, @@ -98,7 +98,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { var tasks []swarm.Task waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) + tasks = d.GetServiceTasks(c, id) return tasks, nil }, checker.HasLen, 1) @@ -106,7 +106,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { // wait for task to start waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateStarting) @@ -120,7 +120,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { }, checker.GreaterThan, 0) // task should be blocked at starting status - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) // make it healthy @@ -128,7 +128,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { // Task should be at running status waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateRunning) } @@ -142,7 +142,7 @@ func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { // service started from this image won't pass health check imageName := "testhealth" - _, _, err := d.buildImageWithOut(imageName, + _, _, err := d.BuildImageWithOut(imageName, `FROM busybox HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ CMD cat /status`, @@ -156,7 +156,7 @@ func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { var tasks []swarm.Task waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) + tasks = d.GetServiceTasks(c, id) return tasks, nil }, checker.HasLen, 1) @@ -164,7 +164,7 @@ func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { // wait for task to start waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateStarting) @@ -178,14 +178,14 @@ func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { }, checker.GreaterThan, 0) // task should be blocked at starting status - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) // make it healthy d.Cmd("exec", containerID, "touch", "/status") // Task should be at running status waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateRunning) } diff --git a/integration-cli/docker_cli_service_logs_experimental_test.go b/integration-cli/docker_cli_service_logs_experimental_test.go index c2216543d7..0c4adcecfb 100644 --- a/integration-cli/docker_cli_service_logs_experimental_test.go +++ b/integration-cli/docker_cli_service_logs_experimental_test.go @@ -38,7 +38,7 @@ func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { // make sure task has been deployed. waitAndAssert(c, defaultReconciliationTimeout, - d.checkActiveContainerCount, checker.Equals, len(services)) + d.CheckActiveContainerCount, checker.Equals, len(services)) for name, message := range services { out, err := d.Cmd("service", "logs", name) @@ -60,10 +60,10 @@ func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) args := []string{"service", "logs", "-f", name} - cmd := exec.Command(dockerBinary, d.prependHostArg(args)...) + cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...) r, w := io.Pipe() cmd.Stdout = w cmd.Stderr = w diff --git a/integration-cli/docker_cli_service_update_test.go b/integration-cli/docker_cli_service_update_test.go index 837370ceeb..ccbf3f7635 100644 --- a/integration-cli/docker_cli_service_update_test.go +++ b/integration-cli/docker_cli_service_update_test.go @@ -20,7 +20,7 @@ func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { // Create a service with a port mapping of 8080:8081. out, err := d.Cmd(serviceArgs...) c.Assert(err, checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) // Update the service: changed the port mapping from 8080:8081 to 8082:8083. _, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) @@ -50,39 +50,39 @@ func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { d := s.AddDaemon(c, true, true) out, err := d.Cmd("service", "create", "--name=test", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - service := d.getService(c, "test") + service := d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 0) // add label to empty set out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 1) c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") // add label to non-empty set out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 2) c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 1) c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") out, err = d.Cmd("service", "update", "test", "--label-rm", "foo") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 0) c.Assert(service.Spec.Labels["foo"], checker.Equals, "") // now make sure we can add again out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 1) c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") } @@ -90,7 +90,7 @@ func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { d := s.AddDaemon(c, true, true) testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ + id := d.CreateSecret(c, swarm.SecretSpec{ swarm.Annotations{ Name: testName, }, @@ -104,7 +104,7 @@ func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) // add secret - out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) @@ -119,7 +119,7 @@ func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { c.Assert(refs[0].File.Name, checker.Equals, testTarget) // remove - out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) + out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) diff --git a/integration-cli/docker_cli_swarm_test.go b/integration-cli/docker_cli_swarm_test.go index 9318598508..daa8a02dde 100644 --- a/integration-cli/docker_cli_swarm_test.go +++ b/integration-cli/docker_cli_swarm_test.go @@ -15,6 +15,7 @@ import ( "time" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/libnetwork/driverapi" "github.com/docker/libnetwork/ipamapi" @@ -27,7 +28,7 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { d := s.AddDaemon(c, true, true) getSpec := func() swarm.Spec { - sw := d.getSwarm(c) + sw := d.GetSwarm(c) return sw.Spec } @@ -50,7 +51,7 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { d := s.AddDaemon(c, false, false) getSpec := func() swarm.Spec { - sw := d.getSwarm(c) + sw := d.GetSwarm(c) return sw.Spec } @@ -96,7 +97,7 @@ func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *check.C) { func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { // init swarm mode and stop a daemon d := s.AddDaemon(c, true, true) - info, err := d.info() + info, err := d.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(d.Stop(), checker.IsNil) @@ -104,13 +105,15 @@ func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { // start a daemon with --cluster-store and --cluster-advertise err = d.Start("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") c.Assert(err, checker.NotNil) - content, _ := ioutil.ReadFile(d.logFile.Name()) + content, err := d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") // start a daemon with --live-restore err = d.Start("--live-restore") c.Assert(err, checker.NotNil) - content, _ = ioutil.ReadFile(d.logFile.Name()) + content, err = d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") // restart for teardown c.Assert(d.Start(), checker.IsNil) @@ -133,9 +136,9 @@ func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) - containers := d.activeContainers() + containers := d.ActiveContainers() out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0]) c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid")) @@ -211,7 +214,7 @@ func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 3) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 3) filter := "name=redis-cluster" @@ -240,10 +243,10 @@ func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { out, err = d.Cmd("service", "update", "--publish-add", "80:80", name) c.Assert(err, checker.IsNil) - out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name) + out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name) c.Assert(err, checker.IsNil) - out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name) + out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name) c.Assert(err, checker.NotNil) out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", name) @@ -260,7 +263,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) out, err = d.Cmd("ps", "-q") c.Assert(err, checker.IsNil) @@ -326,7 +329,7 @@ func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) { out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top") c.Assert(err, checker.IsNil) cID := strings.TrimSpace(out) - d.waitRun(cID) + d.WaitRun(cID) _, err = d.Cmd("rm", "-f", cID) c.Assert(err, checker.IsNil) @@ -449,7 +452,7 @@ func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceRunningTasks(name), checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceRunningTasks(name), checker.Equals, 1) // Filter non-tasks out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false") @@ -664,7 +667,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *check.C) { func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) { d := s.AddDaemon(c, true, true) - path := filepath.Join(d.folder, "env.txt") + path := filepath.Join(d.Folder, "env.txt") err := ioutil.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0644) c.Assert(err, checker.IsNil) @@ -692,7 +695,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) // We need to get the container id. out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") @@ -707,7 +710,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { out, err = d.Cmd("service", "rm", name) c.Assert(err, checker.IsNil) // Make sure container has been destroyed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) // With --tty expectedOutput = "TTY" @@ -715,7 +718,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) // We need to get the container id. out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") @@ -736,7 +739,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) c.Assert(err, checker.IsNil) @@ -759,7 +762,7 @@ func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) { c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) // We need to get the container id. out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") @@ -786,7 +789,7 @@ func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) _, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name) c.Assert(err, checker.IsNil) @@ -796,18 +799,18 @@ func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Equals, "{[1.2.3.4] [example.com] [timeout:3]}") } -func getNodeStatus(c *check.C, d *SwarmDaemon) swarm.LocalNodeState { - info, err := d.info() +func getNodeStatus(c *check.C, d *daemon.Swarm) swarm.LocalNodeState { + info, err := d.SwarmInfo() c.Assert(err, checker.IsNil) return info.LocalNodeState } -func checkSwarmLockedToUnlocked(c *check.C, d *SwarmDaemon, unlockKey string) { +func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Swarm, unlockKey string) { c.Assert(d.Restart(), checker.IsNil) status := getNodeStatus(c, d) if status == swarm.LocalNodeStateLocked { // it must not have updated to be unlocked in time - unlock, wait 3 seconds, and try again - cmd := d.command("swarm", "unlock") + cmd := d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err := cmd.CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) @@ -821,7 +824,7 @@ func checkSwarmLockedToUnlocked(c *check.C, d *SwarmDaemon, unlockKey string) { c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) } -func checkSwarmUnlockedToLocked(c *check.C, d *SwarmDaemon) { +func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Swarm) { c.Assert(d.Restart(), checker.IsNil) status := getNodeStatus(c, d) if status == swarm.LocalNodeStateActive { @@ -859,7 +862,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { c.Assert(d.Restart(), checker.IsNil) c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) - cmd := d.command("swarm", "unlock") + cmd := d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString("wrong-secret-key") out, err := cmd.CombinedOutput() c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) @@ -867,7 +870,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) - cmd = d.command("swarm", "unlock") + cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err = cmd.CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) @@ -897,7 +900,7 @@ func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { // It starts off locked c.Assert(d.Restart("--swarm-default-advertise-addr=lo"), checker.IsNil) - info, err := d.info() + info, err := d.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) @@ -912,14 +915,14 @@ func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { outs, err = d.Cmd("swarm", "leave", "--force") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - info, err = d.info() + info, err = d.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) outs, err = d.Cmd("swarm", "init") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - info, err = d.info() + info, err = d.SwarmInfo() c.Assert(err, checker.IsNil) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) } @@ -956,10 +959,10 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { c.Assert(outs, checker.Equals, unlockKey+"\n") // The ones that got the cluster update should be set to locked - for _, d := range []*SwarmDaemon{d1, d3} { + for _, d := range []*daemon.Swarm{d1, d3} { checkSwarmUnlockedToLocked(c, d) - cmd := d.command("swarm", "unlock") + cmd := d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err := cmd.CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) @@ -978,7 +981,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) // the ones that got the update are now set to unlocked - for _, d := range []*SwarmDaemon{d1, d3} { + for _, d := range []*daemon.Swarm{d1, d3} { checkSwarmLockedToUnlocked(c, d, unlockKey) } @@ -986,7 +989,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateLocked) // unlock it - cmd := d2.command("swarm", "unlock") + cmd := d2.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err := cmd.CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) @@ -1037,10 +1040,10 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { d3 := s.AddDaemon(c, true, true) // both new nodes are locked - for _, d := range []*SwarmDaemon{d2, d3} { + for _, d := range []*daemon.Swarm{d2, d3} { checkSwarmUnlockedToLocked(c, d) - cmd := d.command("swarm", "unlock") + cmd := d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err := cmd.CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) @@ -1048,7 +1051,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { } // get d3's cert - d3cert, err := ioutil.ReadFile(filepath.Join(d3.folder, "root", "swarm", "certificates", "swarm-node.crt")) + d3cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt")) c.Assert(err, checker.IsNil) // demote manager back to worker - workers are not locked @@ -1061,9 +1064,9 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { // to be replaced, then the node still has the manager TLS key which is still locked // (because we never want a manager TLS key to be on disk unencrypted if the cluster // is set to autolock) - waitAndAssert(c, defaultReconciliationTimeout, d3.checkControlAvailable, checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckControlAvailable, checker.False) waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - cert, err := ioutil.ReadFile(filepath.Join(d3.folder, "root", "swarm", "certificates", "swarm-node.crt")) + cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt")) if err != nil { return "", check.Commentf("error: %v", err) } @@ -1111,7 +1114,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { outs, _ = d.Cmd("node", "ls") c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") - cmd := d.command("swarm", "unlock") + cmd := d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err := cmd.CombinedOutput() @@ -1128,7 +1131,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { c.Assert(d.Restart(), checker.IsNil) - cmd = d.command("swarm", "unlock") + cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err = cmd.CombinedOutput() } @@ -1138,7 +1141,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { outs, _ = d.Cmd("node", "ls") c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") - cmd = d.command("swarm", "unlock") + cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(newUnlockKey) out, err = cmd.CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) @@ -1191,13 +1194,13 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { c.Assert(d2.Restart(), checker.IsNil) c.Assert(d3.Restart(), checker.IsNil) - for _, d := range []*SwarmDaemon{d2, d3} { + for _, d := range []*daemon.Swarm{d2, d3} { c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) outs, _ := d.Cmd("node", "ls") c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") - cmd := d.command("swarm", "unlock") + cmd := d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err := cmd.CombinedOutput() @@ -1214,7 +1217,7 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { c.Assert(d.Restart(), checker.IsNil) - cmd = d.command("swarm", "unlock") + cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err = cmd.CombinedOutput() } @@ -1224,7 +1227,7 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { outs, _ = d.Cmd("node", "ls") c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") - cmd = d.command("swarm", "unlock") + cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(newUnlockKey) out, err = cmd.CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) @@ -1260,7 +1263,7 @@ func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *check.C) { c.Assert(unlockKey, checker.Not(checker.Equals), "") checkSwarmUnlockedToLocked(c, d) - cmd := d.command("swarm", "unlock") + cmd := d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) out, err := cmd.CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) @@ -1283,7 +1286,7 @@ func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) // We need to get the container id. out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") @@ -1303,7 +1306,7 @@ func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) { d3 := s.AddDaemon(c, true, false) // Manager Addresses will always show Node 1's address - expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.port) + expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.Port) out, err := d1.Cmd("info") c.Assert(err, checker.IsNil) diff --git a/integration-cli/docker_cli_userns_test.go b/integration-cli/docker_cli_userns_test.go index acf74238b2..2e5d7ff2a4 100644 --- a/integration-cli/docker_cli_userns_test.go +++ b/integration-cli/docker_cli_userns_test.go @@ -36,8 +36,8 @@ func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { defer os.RemoveAll(tmpDirNotExists) // we need to find the uid and gid of the remapped root from the daemon's root dir info - uidgid := strings.Split(filepath.Base(s.d.root), ".") - c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.root))) + uidgid := strings.Split(filepath.Base(s.d.Root), ".") + c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.Root))) uid, err := strconv.Atoi(uidgid[0]) c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) gid, err := strconv.Atoi(uidgid[1]) diff --git a/integration-cli/docker_deprecated_api_v124_test.go b/integration-cli/docker_deprecated_api_v124_test.go index 7bc287eca7..fbb7b4e911 100644 --- a/integration-cli/docker_deprecated_api_v124_test.go +++ b/integration-cli/docker_deprecated_api_v124_test.go @@ -7,6 +7,7 @@ import ( "net/http" "strings" + "github.com/docker/docker/pkg/integration" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -150,7 +151,7 @@ func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) { res, body, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") c.Assert(err, checker.IsNil) - b, err2 := readBody(body) + b, err2 := integration.ReadBody(body) c.Assert(err2, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") diff --git a/integration-cli/docker_hub_pull_suite_test.go b/integration-cli/docker_hub_pull_suite_test.go index df52cae1a4..b769dbd59d 100644 --- a/integration-cli/docker_hub_pull_suite_test.go +++ b/integration-cli/docker_hub_pull_suite_test.go @@ -5,6 +5,7 @@ import ( "runtime" "strings" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -25,7 +26,7 @@ func init() { // relative impact of each individual operation. As part of this suite, all // images are removed after each test. type DockerHubPullSuite struct { - d *Daemon + d *daemon.Daemon ds *DockerSuite } @@ -39,7 +40,9 @@ func newDockerHubPullSuite() *DockerHubPullSuite { // SetUpSuite starts the suite daemon. func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { testRequires(c, DaemonIsLinux) - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ + Experimental: experimentalDaemon, + }) err := s.d.Start() c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err)) } @@ -84,7 +87,7 @@ func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, e // MakeCmd returns an exec.Cmd command to run against the suite daemon. func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { - args := []string{"--host", s.d.sock(), name} + args := []string{"--host", s.d.Sock(), name} args = append(args, arg...) return exec.Command(dockerBinary, args...) } diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 44d412b73e..b87ae2d958 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -3,7 +3,6 @@ package main import ( "bufio" "bytes" - "crypto/tls" "encoding/json" "errors" "fmt" @@ -24,13 +23,14 @@ import ( "github.com/docker/docker/api/types" volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/integration" "github.com/docker/docker/pkg/integration/checker" icmd "github.com/docker/docker/pkg/integration/cmd" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringutils" - "github.com/docker/go-connections/tlsconfig" "github.com/docker/go-units" "github.com/go-check/check" ) @@ -107,55 +107,12 @@ func daemonHost() string { return daemonURLStr } -func getTLSConfig() (*tls.Config, error) { - dockerCertPath := os.Getenv("DOCKER_CERT_PATH") - - if dockerCertPath == "" { - return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") - } - - option := &tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - } - tlsConfig, err := tlsconfig.Client(*option) - if err != nil { - return nil, err - } - - return tlsConfig, nil -} - -func sockConn(timeout time.Duration, daemon string) (net.Conn, error) { - if daemon == "" { - daemon = daemonHost() - } - daemonURL, err := url.Parse(daemon) - if err != nil { - return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) - } - - var c net.Conn - switch daemonURL.Scheme { - case "npipe": - return npipeDial(daemonURL.Path, timeout) - case "unix": - return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) - case "tcp": - if os.Getenv("DOCKER_TLS_VERIFY") != "" { - // Setup the socket TLS configuration. - tlsConfig, err := getTLSConfig() - if err != nil { - return nil, err - } - dialer := &net.Dialer{Timeout: timeout} - return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) - } - return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) - default: - return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) +// FIXME(vdemeester) should probably completely move to daemon struct/methods +func sockConn(timeout time.Duration, daemonStr string) (net.Conn, error) { + if daemonStr == "" { + daemonStr = daemonHost() } + return daemon.SockConn(timeout, daemonStr) } func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { @@ -168,7 +125,7 @@ func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) if err != nil { return -1, nil, err } - b, err := readBody(body) + b, err := integration.ReadBody(body) return res.StatusCode, b, err } @@ -226,11 +183,6 @@ func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string return req, client, nil } -func readBody(b io.ReadCloser) ([]byte, error) { - defer b.Close() - return ioutil.ReadAll(b) -} - func deleteContainer(container ...string) error { result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, container...)...) return result.Compare(icmd.Success) @@ -950,23 +902,7 @@ func getContainerState(c *check.C, id string) (int, bool, error) { } func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd { - return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...) -} - -func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { - args := []string{} - if host != "" { - args = append(args, "--host", host) - } - args = append(args, "build", "-t", name) - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, "-") - buildCmd := exec.Command(dockerBinary, args...) - buildCmd.Stdin = strings.NewReader(dockerfile) - return buildCmd + return daemon.BuildImageCmdWithHost(dockerBinary, name, dockerfile, "", useCache, buildFlags...) } func buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, error) { @@ -1401,39 +1337,7 @@ func waitInspect(name, expr, expected string, timeout time.Duration) error { } func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { - after := time.After(timeout) - - args := append(arg, "inspect", "-f", expr, name) - for { - result := icmd.RunCommand(dockerBinary, args...) - if result.Error != nil { - if !strings.Contains(result.Stderr(), "No such") { - return fmt.Errorf("error executing docker inspect: %v\n%s", - result.Stderr(), result.Stdout()) - } - select { - case <-after: - return result.Error - default: - time.Sleep(10 * time.Millisecond) - continue - } - } - - out := strings.TrimSpace(result.Stdout()) - if out == expected { - break - } - - select { - case <-after: - return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) - default: - } - - time.Sleep(100 * time.Millisecond) - } - return nil + return daemon.WaitInspectWithArgs(dockerBinary, name, expr, expected, timeout, arg...) } func getInspectBody(c *check.C, version, id string) []byte { diff --git a/pkg/integration/utils.go b/pkg/integration/utils.go index f2089c43c4..af3aa1a910 100644 --- a/pkg/integration/utils.go +++ b/pkg/integration/utils.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "os" "os/exec" "path/filepath" @@ -225,3 +226,9 @@ func RunAtDifferentDate(date time.Time, block func()) { block() return } + +// ReadBody read the specified ReadCloser content and returns it +func ReadBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) +}