2016-12-09 04:17:53 -05:00
|
|
|
package daemon
|
2016-03-04 03:29:24 -05:00
|
|
|
|
|
|
|
import (
|
2016-06-13 22:54:20 -04:00
|
|
|
"bytes"
|
2016-03-04 03:29:24 -05:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2016-12-09 04:17:53 -05:00
|
|
|
"io/ioutil"
|
2016-03-04 03:29:24 -05:00
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"os/exec"
|
|
|
|
"path/filepath"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2017-05-23 23:56:26 -04:00
|
|
|
"github.com/docker/docker/api"
|
2017-08-22 17:07:52 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
2016-09-06 14:18:12 -04:00
|
|
|
"github.com/docker/docker/api/types/events"
|
2017-05-23 23:56:26 -04:00
|
|
|
"github.com/docker/docker/client"
|
2016-12-30 12:23:00 -05:00
|
|
|
"github.com/docker/docker/integration-cli/checker"
|
2016-12-30 04:49:36 -05:00
|
|
|
"github.com/docker/docker/integration-cli/request"
|
2016-03-04 03:29:24 -05:00
|
|
|
"github.com/docker/docker/opts"
|
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
2016-09-06 18:08:14 -04:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2016-03-04 03:29:24 -05:00
|
|
|
"github.com/docker/go-connections/sockets"
|
2016-09-02 21:27:20 -04:00
|
|
|
"github.com/docker/go-connections/tlsconfig"
|
2016-03-04 03:29:24 -05:00
|
|
|
"github.com/go-check/check"
|
2017-08-23 17:01:29 -04:00
|
|
|
"github.com/gotestyourself/gotestyourself/icmd"
|
2016-12-09 04:18:02 -05:00
|
|
|
"github.com/pkg/errors"
|
2017-08-22 17:07:52 -04:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"golang.org/x/net/context"
|
2016-03-04 03:29:24 -05:00
|
|
|
)
|
|
|
|
|
2016-12-14 04:52:51 -05:00
|
|
|
type testingT interface {
|
2017-08-25 16:51:41 -04:00
|
|
|
require.TestingT
|
2016-12-14 04:52:51 -05:00
|
|
|
logT
|
|
|
|
Fatalf(string, ...interface{})
|
|
|
|
}
|
|
|
|
|
|
|
|
type logT interface {
|
|
|
|
Logf(string, ...interface{})
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// SockRoot holds the path of the default docker integration daemon socket
|
|
|
|
var SockRoot = filepath.Join(os.TempDir(), "docker-integration")
|
2016-07-28 10:19:09 -04:00
|
|
|
|
2016-12-09 17:20:14 -05:00
|
|
|
var errDaemonNotStarted = errors.New("daemon not started")
|
|
|
|
|
2016-03-04 03:29:24 -05:00
|
|
|
// Daemon represents a Docker daemon for the testing framework.
|
|
|
|
type Daemon struct {
|
2016-12-09 04:17:53 -05:00
|
|
|
GlobalFlags []string
|
|
|
|
Root string
|
|
|
|
Folder string
|
|
|
|
Wait chan error
|
|
|
|
UseDefaultHost bool
|
|
|
|
UseDefaultTLSHost bool
|
|
|
|
|
|
|
|
id string
|
|
|
|
logFile *os.File
|
|
|
|
stdin io.WriteCloser
|
|
|
|
stdout, stderr io.ReadCloser
|
|
|
|
cmd *exec.Cmd
|
|
|
|
storageDriver string
|
|
|
|
userlandProxy bool
|
|
|
|
execRoot string
|
|
|
|
experimental bool
|
|
|
|
dockerBinary string
|
|
|
|
dockerdBinary string
|
2016-12-14 04:52:51 -05:00
|
|
|
log logT
|
2016-12-09 04:17:53 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Config holds docker daemon integration configuration
|
|
|
|
type Config struct {
|
|
|
|
Experimental bool
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
type clientConfig struct {
|
|
|
|
transport *http.Transport
|
|
|
|
scheme string
|
|
|
|
addr string
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// New returns a Daemon instance to be used for testing.
|
[EXPERIMENTAL] Integration Test on Swarm
This commit adds contrib/integration-cli-on-swarm/integration-cli-on-swarm.sh,
which enables IT to be running in parallel, using Swarm-mode and Funker.
Please refer to contrib/integration-cli-on-swarm/README.md
The test takes almost 5 to 6 minutes, with 10 n1-standard-4 GCE instances.
$ ./contrib/integration-cli-on-swarm/integration-cli-on-swarm.sh --push-worker-image example.gcr.io/foo/bar --replicas 30 --shuffle
2016/12/29 08:32:15 Loaded 1618 tests (30 chunks)
2016/12/29 08:32:15 Executing 30 chunks in parallel, against "integration-cli-worker"
2016/12/29 08:32:15 Executing chunk 0 (contains 54 test filters)
..
2016/12/29 08:34:34 Finished chunk 28 [1/30] with 54 test filters in 2m19.098068003s, code=0.
2016/12/29 08:34:38 Finished chunk 12 [2/30] with 54 test filters in 2m23.088569511s, code=0.
2016/12/29 08:34:48 Finished chunk 10 [3/30] with 54 test filters in 2m33.880679079s, code=0.
2016/12/29 08:34:54 Finished chunk 20 [4/30] with 54 test filters in 2m39.973747028s, code=0.
2016/12/29 08:35:11 Finished chunk 18 [5/30] with 54 test filters in 2m56.28384361s, code=0.
2016/12/29 08:35:11 Finished chunk 29 [6/30] with 52 test filters in 2m56.54047088s, code=0.
2016/12/29 08:35:15 Finished chunk 1 [7/30] with 54 test filters in 3m0.285044426s, code=0.
2016/12/29 08:35:22 Finished chunk 6 [8/30] with 54 test filters in 3m7.211775338s, code=0.
2016/12/29 08:35:24 Finished chunk 25 [9/30] with 54 test filters in 3m9.938413009s, code=0.
2016/12/29 08:35:30 Finished chunk 27 [10/30] with 54 test filters in 3m15.219834368s, code=0.
2016/12/29 08:35:36 Finished chunk 9 [11/30] with 54 test filters in 3m21.615434162s, code=0.
2016/12/29 08:35:41 Finished chunk 13 [12/30] with 54 test filters in 3m26.576907401s, code=0.
2016/12/29 08:35:45 Finished chunk 17 [13/30] with 54 test filters in 3m30.290752537s, code=0.
2016/12/29 08:35:53 Finished chunk 2 [14/30] with 54 test filters in 3m38.148423321s, code=0.
2016/12/29 08:35:55 Finished chunk 24 [15/30] with 54 test filters in 3m40.09669137s, code=0.
2016/12/29 08:35:57 Finished chunk 8 [16/30] with 54 test filters in 3m42.299945108s, code=0.
2016/12/29 08:35:57 Finished chunk 22 [17/30] with 54 test filters in 3m42.946558809s, code=0.
2016/12/29 08:35:59 Finished chunk 23 [18/30] with 54 test filters in 3m44.232557165s, code=0.
2016/12/29 08:36:02 Finished chunk 3 [19/30] with 54 test filters in 3m47.112051358s, code=0.
2016/12/29 08:36:11 Finished chunk 15 [20/30] with 54 test filters in 3m56.340656645s, code=0.
2016/12/29 08:36:11 Finished chunk 11 [21/30] with 54 test filters in 3m56.882401231s, code=0.
2016/12/29 08:36:22 Finished chunk 19 [22/30] with 54 test filters in 4m7.551093516s, code=0.
2016/12/29 08:36:23 Finished chunk 21 [23/30] with 54 test filters in 4m8.221093446s, code=0.
2016/12/29 08:36:25 Finished chunk 16 [24/30] with 54 test filters in 4m10.450451705s, code=0.
2016/12/29 08:36:27 Finished chunk 5 [25/30] with 54 test filters in 4m12.162272692s, code=0.
2016/12/29 08:36:28 Finished chunk 14 [26/30] with 54 test filters in 4m13.977801031s, code=0.
2016/12/29 08:36:29 Finished chunk 0 [27/30] with 54 test filters in 4m14.34086812s, code=0.
2016/12/29 08:36:49 Finished chunk 26 [28/30] with 54 test filters in 4m34.437085539s, code=0.
2016/12/29 08:37:14 Finished chunk 7 [29/30] with 54 test filters in 4m59.22902721s, code=0.
2016/12/29 08:37:20 Finished chunk 4 [30/30] with 54 test filters in 5m5.103469214s, code=0.
2016/12/29 08:37:20 Executed 30 chunks in 5m5.104379119s. PASS: 30, FAIL: 0.
Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
2016-12-07 02:16:48 -05:00
|
|
|
// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST.
|
2016-03-04 03:29:24 -05:00
|
|
|
// The daemon will not automatically start.
|
2016-12-14 04:52:51 -05:00
|
|
|
func New(t testingT, dockerBinary string, dockerdBinary string, config Config) *Daemon {
|
[EXPERIMENTAL] Integration Test on Swarm
This commit adds contrib/integration-cli-on-swarm/integration-cli-on-swarm.sh,
which enables IT to be running in parallel, using Swarm-mode and Funker.
Please refer to contrib/integration-cli-on-swarm/README.md
The test takes almost 5 to 6 minutes, with 10 n1-standard-4 GCE instances.
$ ./contrib/integration-cli-on-swarm/integration-cli-on-swarm.sh --push-worker-image example.gcr.io/foo/bar --replicas 30 --shuffle
2016/12/29 08:32:15 Loaded 1618 tests (30 chunks)
2016/12/29 08:32:15 Executing 30 chunks in parallel, against "integration-cli-worker"
2016/12/29 08:32:15 Executing chunk 0 (contains 54 test filters)
..
2016/12/29 08:34:34 Finished chunk 28 [1/30] with 54 test filters in 2m19.098068003s, code=0.
2016/12/29 08:34:38 Finished chunk 12 [2/30] with 54 test filters in 2m23.088569511s, code=0.
2016/12/29 08:34:48 Finished chunk 10 [3/30] with 54 test filters in 2m33.880679079s, code=0.
2016/12/29 08:34:54 Finished chunk 20 [4/30] with 54 test filters in 2m39.973747028s, code=0.
2016/12/29 08:35:11 Finished chunk 18 [5/30] with 54 test filters in 2m56.28384361s, code=0.
2016/12/29 08:35:11 Finished chunk 29 [6/30] with 52 test filters in 2m56.54047088s, code=0.
2016/12/29 08:35:15 Finished chunk 1 [7/30] with 54 test filters in 3m0.285044426s, code=0.
2016/12/29 08:35:22 Finished chunk 6 [8/30] with 54 test filters in 3m7.211775338s, code=0.
2016/12/29 08:35:24 Finished chunk 25 [9/30] with 54 test filters in 3m9.938413009s, code=0.
2016/12/29 08:35:30 Finished chunk 27 [10/30] with 54 test filters in 3m15.219834368s, code=0.
2016/12/29 08:35:36 Finished chunk 9 [11/30] with 54 test filters in 3m21.615434162s, code=0.
2016/12/29 08:35:41 Finished chunk 13 [12/30] with 54 test filters in 3m26.576907401s, code=0.
2016/12/29 08:35:45 Finished chunk 17 [13/30] with 54 test filters in 3m30.290752537s, code=0.
2016/12/29 08:35:53 Finished chunk 2 [14/30] with 54 test filters in 3m38.148423321s, code=0.
2016/12/29 08:35:55 Finished chunk 24 [15/30] with 54 test filters in 3m40.09669137s, code=0.
2016/12/29 08:35:57 Finished chunk 8 [16/30] with 54 test filters in 3m42.299945108s, code=0.
2016/12/29 08:35:57 Finished chunk 22 [17/30] with 54 test filters in 3m42.946558809s, code=0.
2016/12/29 08:35:59 Finished chunk 23 [18/30] with 54 test filters in 3m44.232557165s, code=0.
2016/12/29 08:36:02 Finished chunk 3 [19/30] with 54 test filters in 3m47.112051358s, code=0.
2016/12/29 08:36:11 Finished chunk 15 [20/30] with 54 test filters in 3m56.340656645s, code=0.
2016/12/29 08:36:11 Finished chunk 11 [21/30] with 54 test filters in 3m56.882401231s, code=0.
2016/12/29 08:36:22 Finished chunk 19 [22/30] with 54 test filters in 4m7.551093516s, code=0.
2016/12/29 08:36:23 Finished chunk 21 [23/30] with 54 test filters in 4m8.221093446s, code=0.
2016/12/29 08:36:25 Finished chunk 16 [24/30] with 54 test filters in 4m10.450451705s, code=0.
2016/12/29 08:36:27 Finished chunk 5 [25/30] with 54 test filters in 4m12.162272692s, code=0.
2016/12/29 08:36:28 Finished chunk 14 [26/30] with 54 test filters in 4m13.977801031s, code=0.
2016/12/29 08:36:29 Finished chunk 0 [27/30] with 54 test filters in 4m14.34086812s, code=0.
2016/12/29 08:36:49 Finished chunk 26 [28/30] with 54 test filters in 4m34.437085539s, code=0.
2016/12/29 08:37:14 Finished chunk 7 [29/30] with 54 test filters in 4m59.22902721s, code=0.
2016/12/29 08:37:20 Finished chunk 4 [30/30] with 54 test filters in 5m5.103469214s, code=0.
2016/12/29 08:37:20 Executed 30 chunks in 5m5.104379119s. PASS: 30, FAIL: 0.
Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
2016-12-07 02:16:48 -05:00
|
|
|
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
|
2016-12-14 04:52:51 -05:00
|
|
|
if dest == "" {
|
[EXPERIMENTAL] Integration Test on Swarm
This commit adds contrib/integration-cli-on-swarm/integration-cli-on-swarm.sh,
which enables IT to be running in parallel, using Swarm-mode and Funker.
Please refer to contrib/integration-cli-on-swarm/README.md
The test takes almost 5 to 6 minutes, with 10 n1-standard-4 GCE instances.
$ ./contrib/integration-cli-on-swarm/integration-cli-on-swarm.sh --push-worker-image example.gcr.io/foo/bar --replicas 30 --shuffle
2016/12/29 08:32:15 Loaded 1618 tests (30 chunks)
2016/12/29 08:32:15 Executing 30 chunks in parallel, against "integration-cli-worker"
2016/12/29 08:32:15 Executing chunk 0 (contains 54 test filters)
..
2016/12/29 08:34:34 Finished chunk 28 [1/30] with 54 test filters in 2m19.098068003s, code=0.
2016/12/29 08:34:38 Finished chunk 12 [2/30] with 54 test filters in 2m23.088569511s, code=0.
2016/12/29 08:34:48 Finished chunk 10 [3/30] with 54 test filters in 2m33.880679079s, code=0.
2016/12/29 08:34:54 Finished chunk 20 [4/30] with 54 test filters in 2m39.973747028s, code=0.
2016/12/29 08:35:11 Finished chunk 18 [5/30] with 54 test filters in 2m56.28384361s, code=0.
2016/12/29 08:35:11 Finished chunk 29 [6/30] with 52 test filters in 2m56.54047088s, code=0.
2016/12/29 08:35:15 Finished chunk 1 [7/30] with 54 test filters in 3m0.285044426s, code=0.
2016/12/29 08:35:22 Finished chunk 6 [8/30] with 54 test filters in 3m7.211775338s, code=0.
2016/12/29 08:35:24 Finished chunk 25 [9/30] with 54 test filters in 3m9.938413009s, code=0.
2016/12/29 08:35:30 Finished chunk 27 [10/30] with 54 test filters in 3m15.219834368s, code=0.
2016/12/29 08:35:36 Finished chunk 9 [11/30] with 54 test filters in 3m21.615434162s, code=0.
2016/12/29 08:35:41 Finished chunk 13 [12/30] with 54 test filters in 3m26.576907401s, code=0.
2016/12/29 08:35:45 Finished chunk 17 [13/30] with 54 test filters in 3m30.290752537s, code=0.
2016/12/29 08:35:53 Finished chunk 2 [14/30] with 54 test filters in 3m38.148423321s, code=0.
2016/12/29 08:35:55 Finished chunk 24 [15/30] with 54 test filters in 3m40.09669137s, code=0.
2016/12/29 08:35:57 Finished chunk 8 [16/30] with 54 test filters in 3m42.299945108s, code=0.
2016/12/29 08:35:57 Finished chunk 22 [17/30] with 54 test filters in 3m42.946558809s, code=0.
2016/12/29 08:35:59 Finished chunk 23 [18/30] with 54 test filters in 3m44.232557165s, code=0.
2016/12/29 08:36:02 Finished chunk 3 [19/30] with 54 test filters in 3m47.112051358s, code=0.
2016/12/29 08:36:11 Finished chunk 15 [20/30] with 54 test filters in 3m56.340656645s, code=0.
2016/12/29 08:36:11 Finished chunk 11 [21/30] with 54 test filters in 3m56.882401231s, code=0.
2016/12/29 08:36:22 Finished chunk 19 [22/30] with 54 test filters in 4m7.551093516s, code=0.
2016/12/29 08:36:23 Finished chunk 21 [23/30] with 54 test filters in 4m8.221093446s, code=0.
2016/12/29 08:36:25 Finished chunk 16 [24/30] with 54 test filters in 4m10.450451705s, code=0.
2016/12/29 08:36:27 Finished chunk 5 [25/30] with 54 test filters in 4m12.162272692s, code=0.
2016/12/29 08:36:28 Finished chunk 14 [26/30] with 54 test filters in 4m13.977801031s, code=0.
2016/12/29 08:36:29 Finished chunk 0 [27/30] with 54 test filters in 4m14.34086812s, code=0.
2016/12/29 08:36:49 Finished chunk 26 [28/30] with 54 test filters in 4m34.437085539s, code=0.
2016/12/29 08:37:14 Finished chunk 7 [29/30] with 54 test filters in 4m59.22902721s, code=0.
2016/12/29 08:37:20 Finished chunk 4 [30/30] with 54 test filters in 5m5.103469214s, code=0.
2016/12/29 08:37:20 Executed 30 chunks in 5m5.104379119s. PASS: 30, FAIL: 0.
Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
2016-12-07 02:16:48 -05:00
|
|
|
dest = os.Getenv("DEST")
|
|
|
|
}
|
|
|
|
if dest == "" {
|
|
|
|
t.Fatalf("Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable")
|
2016-12-14 04:52:51 -05:00
|
|
|
}
|
2016-03-04 03:29:24 -05:00
|
|
|
|
2016-12-14 04:52:51 -05:00
|
|
|
if err := os.MkdirAll(SockRoot, 0700); err != nil {
|
|
|
|
t.Fatalf("could not create daemon socket root")
|
|
|
|
}
|
2016-07-28 10:19:09 -04:00
|
|
|
|
2016-09-06 18:08:14 -04:00
|
|
|
id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID()))
|
2016-03-04 03:29:24 -05:00
|
|
|
dir := filepath.Join(dest, id)
|
|
|
|
daemonFolder, err := filepath.Abs(dir)
|
2016-12-14 04:52:51 -05:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Could not make %q an absolute path", dir)
|
|
|
|
}
|
2016-03-04 03:29:24 -05:00
|
|
|
daemonRoot := filepath.Join(daemonFolder, "root")
|
|
|
|
|
2016-12-14 04:52:51 -05:00
|
|
|
if err := os.MkdirAll(daemonRoot, 0755); err != nil {
|
|
|
|
t.Fatalf("Could not create daemon root %q", dir)
|
|
|
|
}
|
2016-03-04 03:29:24 -05:00
|
|
|
|
|
|
|
userlandProxy := true
|
|
|
|
if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
|
|
|
|
if val, err := strconv.ParseBool(env); err != nil {
|
|
|
|
userlandProxy = val
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &Daemon{
|
|
|
|
id: id,
|
2016-12-09 04:17:53 -05:00
|
|
|
Folder: daemonFolder,
|
|
|
|
Root: daemonRoot,
|
2016-03-04 03:29:24 -05:00
|
|
|
storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"),
|
|
|
|
userlandProxy: userlandProxy,
|
2016-08-19 16:06:28 -04:00
|
|
|
execRoot: filepath.Join(os.TempDir(), "docker-execroot", id),
|
2016-12-09 04:17:53 -05:00
|
|
|
dockerBinary: dockerBinary,
|
|
|
|
dockerdBinary: dockerdBinary,
|
|
|
|
experimental: config.Experimental,
|
2016-12-14 04:52:51 -05:00
|
|
|
log: t,
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-22 14:21:34 -05:00
|
|
|
// RootDir returns the root directory of the daemon.
|
|
|
|
func (d *Daemon) RootDir() string {
|
2016-12-09 04:17:53 -05:00
|
|
|
return d.Root
|
|
|
|
}
|
|
|
|
|
|
|
|
// ID returns the generated id of the daemon
|
|
|
|
func (d *Daemon) ID() string {
|
|
|
|
return d.id
|
|
|
|
}
|
|
|
|
|
|
|
|
// StorageDriver returns the configured storage driver of the daemon
|
|
|
|
func (d *Daemon) StorageDriver() string {
|
|
|
|
return d.storageDriver
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanupExecRoot cleans the daemon exec root (network namespaces, ...)
|
|
|
|
func (d *Daemon) CleanupExecRoot(c *check.C) {
|
|
|
|
cleanupExecRoot(c, d.execRoot)
|
2016-11-22 14:21:34 -05:00
|
|
|
}
|
|
|
|
|
2016-03-04 03:29:24 -05:00
|
|
|
func (d *Daemon) getClientConfig() (*clientConfig, error) {
|
|
|
|
var (
|
|
|
|
transport *http.Transport
|
|
|
|
scheme string
|
|
|
|
addr string
|
|
|
|
proto string
|
|
|
|
)
|
2016-12-09 04:17:53 -05:00
|
|
|
if d.UseDefaultTLSHost {
|
2016-03-04 03:29:24 -05:00
|
|
|
option := &tlsconfig.Options{
|
|
|
|
CAFile: "fixtures/https/ca.pem",
|
|
|
|
CertFile: "fixtures/https/client-cert.pem",
|
|
|
|
KeyFile: "fixtures/https/client-key.pem",
|
|
|
|
}
|
|
|
|
tlsConfig, err := tlsconfig.Client(*option)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
transport = &http.Transport{
|
|
|
|
TLSClientConfig: tlsConfig,
|
|
|
|
}
|
|
|
|
addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort)
|
|
|
|
scheme = "https"
|
|
|
|
proto = "tcp"
|
2016-12-09 04:17:53 -05:00
|
|
|
} else if d.UseDefaultHost {
|
2016-03-04 03:29:24 -05:00
|
|
|
addr = opts.DefaultUnixSocket
|
|
|
|
proto = "unix"
|
|
|
|
scheme = "http"
|
|
|
|
transport = &http.Transport{}
|
|
|
|
} else {
|
2016-07-28 10:19:09 -04:00
|
|
|
addr = d.sockPath()
|
2016-03-04 03:29:24 -05:00
|
|
|
proto = "unix"
|
|
|
|
scheme = "http"
|
|
|
|
transport = &http.Transport{}
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:18:02 -05:00
|
|
|
if err := sockets.ConfigureTransport(transport, proto, addr); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-11 15:38:52 -05:00
|
|
|
transport.DisableKeepAlives = true
|
2016-03-04 03:29:24 -05:00
|
|
|
|
|
|
|
return &clientConfig{
|
|
|
|
transport: transport,
|
|
|
|
scheme: scheme,
|
|
|
|
addr: addr,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2016-12-09 17:20:14 -05:00
|
|
|
// Start starts the daemon and return once it is ready to receive requests.
|
2016-12-14 04:52:51 -05:00
|
|
|
func (d *Daemon) Start(t testingT, args ...string) {
|
|
|
|
if err := d.StartWithError(args...); err != nil {
|
|
|
|
t.Fatalf("Error starting daemon with arguments: %v", args)
|
|
|
|
}
|
2016-12-09 17:20:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// StartWithError starts the daemon and return once it is ready to receive requests.
|
|
|
|
// It returns an error in case it couldn't start.
|
|
|
|
func (d *Daemon) StartWithError(args ...string) error {
|
2016-12-09 04:17:53 -05:00
|
|
|
logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
|
2016-12-09 04:18:02 -05:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "[%s] Could not create %s/docker.log", d.id, d.Folder)
|
|
|
|
}
|
2016-03-04 03:29:24 -05:00
|
|
|
|
|
|
|
return d.StartWithLogFile(logFile, args...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// StartWithLogFile will start the daemon and attach its streams to a given file.
|
|
|
|
func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
2016-12-09 04:17:53 -05:00
|
|
|
dockerdBinary, err := exec.LookPath(d.dockerdBinary)
|
2016-12-09 04:18:02 -05:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id)
|
|
|
|
}
|
2016-03-04 03:29:24 -05:00
|
|
|
args := append(d.GlobalFlags,
|
2017-09-22 09:52:41 -04:00
|
|
|
"--containerd", "/var/run/docker/containerd/docker-containerd.sock",
|
2016-11-22 01:17:24 -05:00
|
|
|
"--data-root", d.Root,
|
2016-08-19 16:06:28 -04:00
|
|
|
"--exec-root", d.execRoot,
|
2016-12-09 04:17:53 -05:00
|
|
|
"--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder),
|
2016-03-04 03:29:24 -05:00
|
|
|
fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
|
|
|
|
)
|
2016-12-09 04:17:53 -05:00
|
|
|
if d.experimental {
|
2016-11-03 12:47:50 -04:00
|
|
|
args = append(args, "--experimental", "--init")
|
2016-10-06 10:09:54 -04:00
|
|
|
}
|
2016-12-09 04:17:53 -05:00
|
|
|
if !(d.UseDefaultHost || d.UseDefaultTLSHost) {
|
|
|
|
args = append(args, []string{"--host", d.Sock()}...)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
|
|
|
args = append(args, []string{"--userns-remap", root}...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't explicitly set the log-level or debug flag(-D) then
|
|
|
|
// turn on debug mode
|
|
|
|
foundLog := false
|
|
|
|
foundSd := false
|
|
|
|
for _, a := range providedArgs {
|
|
|
|
if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
|
|
|
|
foundLog = true
|
|
|
|
}
|
|
|
|
if strings.Contains(a, "--storage-driver") {
|
|
|
|
foundSd = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundLog {
|
|
|
|
args = append(args, "--debug")
|
|
|
|
}
|
|
|
|
if d.storageDriver != "" && !foundSd {
|
|
|
|
args = append(args, "--storage-driver", d.storageDriver)
|
|
|
|
}
|
|
|
|
|
|
|
|
args = append(args, providedArgs...)
|
2016-05-26 07:14:35 -04:00
|
|
|
d.cmd = exec.Command(dockerdBinary, args...)
|
2016-06-20 20:44:45 -04:00
|
|
|
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
|
2016-03-04 03:29:24 -05:00
|
|
|
d.cmd.Stdout = out
|
|
|
|
d.cmd.Stderr = out
|
|
|
|
d.logFile = out
|
|
|
|
|
|
|
|
if err := d.cmd.Start(); err != nil {
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("[%s] could not start daemon container: %v", d.id, err)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
wait := make(chan error)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
wait <- d.cmd.Wait()
|
2016-12-14 04:52:51 -05:00
|
|
|
d.log.Logf("[%s] exiting daemon", d.id)
|
2016-03-04 03:29:24 -05:00
|
|
|
close(wait)
|
|
|
|
}()
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
d.Wait = wait
|
2016-03-04 03:29:24 -05:00
|
|
|
|
|
|
|
tick := time.Tick(500 * time.Millisecond)
|
|
|
|
// make sure daemon is ready to receive requests
|
|
|
|
startTime := time.Now().Unix()
|
|
|
|
for {
|
2016-12-14 04:52:51 -05:00
|
|
|
d.log.Logf("[%s] waiting for daemon to start", d.id)
|
2016-03-04 03:29:24 -05:00
|
|
|
if time.Now().Unix()-startTime > 5 {
|
|
|
|
// After 5 seconds, give up
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("[%s] Daemon exited and never started", d.id)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-time.After(2 * time.Second):
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("[%s] timeout: daemon does not respond", d.id)
|
2016-03-04 03:29:24 -05:00
|
|
|
case <-tick:
|
|
|
|
clientConfig, err := d.getClientConfig()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
client := &http.Client{
|
|
|
|
Transport: clientConfig.transport,
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequest("GET", "/_ping", nil)
|
2016-12-09 04:18:02 -05:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "[%s] could not create new request", d.id)
|
|
|
|
}
|
2016-03-04 03:29:24 -05:00
|
|
|
req.URL.Host = clientConfig.addr
|
|
|
|
req.URL.Scheme = clientConfig.scheme
|
|
|
|
resp, err := client.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2017-01-11 15:38:52 -05:00
|
|
|
resp.Body.Close()
|
2016-03-04 03:29:24 -05:00
|
|
|
if resp.StatusCode != http.StatusOK {
|
2016-12-14 04:52:51 -05:00
|
|
|
d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
2016-12-14 04:52:51 -05:00
|
|
|
d.log.Logf("[%s] daemon started\n", d.id)
|
2016-12-09 04:17:53 -05:00
|
|
|
d.Root, err = d.queryRootDir()
|
2016-03-04 03:29:24 -05:00
|
|
|
if err != nil {
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("[%s] error querying daemon for root directory: %v", d.id, err)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
return nil
|
2016-12-09 04:17:53 -05:00
|
|
|
case <-d.Wait:
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("[%s] Daemon exited during startup", d.id)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// StartWithBusybox will first start the daemon with Daemon.Start()
|
|
|
|
// then save the busybox image from the main daemon and load it into this Daemon instance.
|
2016-12-14 04:52:51 -05:00
|
|
|
func (d *Daemon) StartWithBusybox(t testingT, arg ...string) {
|
|
|
|
d.Start(t, arg...)
|
2017-08-25 16:51:41 -04:00
|
|
|
d.LoadBusybox(t)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
2016-03-18 14:50:19 -04:00
|
|
|
// Kill will send a SIGKILL to the daemon
|
|
|
|
func (d *Daemon) Kill() error {
|
2016-12-09 04:17:53 -05:00
|
|
|
if d.cmd == nil || d.Wait == nil {
|
2016-12-09 17:20:14 -05:00
|
|
|
return errDaemonNotStarted
|
2016-03-18 14:50:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
d.logFile.Close()
|
|
|
|
d.cmd = nil
|
|
|
|
}()
|
|
|
|
|
|
|
|
if err := d.cmd.Process.Kill(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil {
|
2016-03-18 14:50:19 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// Pid returns the pid of the daemon
|
|
|
|
func (d *Daemon) Pid() int {
|
|
|
|
return d.cmd.Process.Pid
|
|
|
|
}
|
|
|
|
|
|
|
|
// Interrupt stops the daemon by sending it an Interrupt signal
|
|
|
|
func (d *Daemon) Interrupt() error {
|
|
|
|
return d.Signal(os.Interrupt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Signal sends the specified signal to the daemon if running
|
|
|
|
func (d *Daemon) Signal(signal os.Signal) error {
|
|
|
|
if d.cmd == nil || d.Wait == nil {
|
2016-12-09 17:20:14 -05:00
|
|
|
return errDaemonNotStarted
|
2016-12-09 04:17:53 -05:00
|
|
|
}
|
|
|
|
return d.cmd.Process.Signal(signal)
|
|
|
|
}
|
|
|
|
|
2016-07-27 14:17:44 -04:00
|
|
|
// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its
|
|
|
|
// stack to its log file and exit
|
|
|
|
// This is used primarily for gathering debug information on test timeout
|
|
|
|
func (d *Daemon) DumpStackAndQuit() {
|
|
|
|
if d.cmd == nil || d.cmd.Process == nil {
|
|
|
|
return
|
|
|
|
}
|
2016-12-09 04:17:53 -05:00
|
|
|
SignalDaemonDump(d.cmd.Process.Pid)
|
2016-07-27 14:17:44 -04:00
|
|
|
}
|
|
|
|
|
2016-03-04 03:29:24 -05:00
|
|
|
// Stop will send a SIGINT every second and wait for the daemon to stop.
|
2016-12-09 17:20:14 -05:00
|
|
|
// If it times out, a SIGKILL is sent.
|
|
|
|
// Stop will not delete the daemon directory. If a purged daemon is needed,
|
|
|
|
// instantiate a new one with NewDaemon.
|
|
|
|
// If an error occurs while starting the daemon, the test will fail.
|
2016-12-14 04:52:51 -05:00
|
|
|
func (d *Daemon) Stop(t testingT) {
|
2016-12-09 17:20:14 -05:00
|
|
|
err := d.StopWithError()
|
|
|
|
if err != nil {
|
|
|
|
if err != errDaemonNotStarted {
|
2016-12-14 04:52:51 -05:00
|
|
|
t.Fatalf("Error while stopping the daemon %s : %v", d.id, err)
|
2016-12-09 17:20:14 -05:00
|
|
|
} else {
|
2016-12-14 04:52:51 -05:00
|
|
|
t.Logf("Daemon %s is not started", d.id)
|
2016-12-09 17:20:14 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// StopWithError will send a SIGINT every second and wait for the daemon to stop.
|
2016-03-04 03:29:24 -05:00
|
|
|
// If it timeouts, a SIGKILL is sent.
|
|
|
|
// Stop will not delete the daemon directory. If a purged daemon is needed,
|
|
|
|
// instantiate a new one with NewDaemon.
|
2016-12-09 17:20:14 -05:00
|
|
|
func (d *Daemon) StopWithError() error {
|
2016-12-09 04:17:53 -05:00
|
|
|
if d.cmd == nil || d.Wait == nil {
|
2016-12-09 17:20:14 -05:00
|
|
|
return errDaemonNotStarted
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
d.logFile.Close()
|
|
|
|
d.cmd = nil
|
|
|
|
}()
|
|
|
|
|
|
|
|
i := 1
|
|
|
|
tick := time.Tick(time.Second)
|
|
|
|
|
|
|
|
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
2016-12-09 17:20:14 -05:00
|
|
|
if strings.Contains(err.Error(), "os: process already finished") {
|
|
|
|
return errDaemonNotStarted
|
|
|
|
}
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("could not send signal: %v", err)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
out1:
|
|
|
|
for {
|
|
|
|
select {
|
2016-12-09 04:17:53 -05:00
|
|
|
case err := <-d.Wait:
|
2016-03-04 03:29:24 -05:00
|
|
|
return err
|
2016-06-13 22:54:20 -04:00
|
|
|
case <-time.After(20 * time.Second):
|
2016-03-04 03:29:24 -05:00
|
|
|
// time for stopping jobs and run onShutdown hooks
|
2016-12-14 04:52:51 -05:00
|
|
|
d.log.Logf("[%s] daemon started", d.id)
|
2016-03-04 03:29:24 -05:00
|
|
|
break out1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out2:
|
|
|
|
for {
|
|
|
|
select {
|
2016-12-09 04:17:53 -05:00
|
|
|
case err := <-d.Wait:
|
2016-03-04 03:29:24 -05:00
|
|
|
return err
|
|
|
|
case <-tick:
|
|
|
|
i++
|
2016-06-13 22:54:20 -04:00
|
|
|
if i > 5 {
|
2016-12-14 04:52:51 -05:00
|
|
|
d.log.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
|
2016-03-04 03:29:24 -05:00
|
|
|
break out2
|
|
|
|
}
|
2016-12-14 04:52:51 -05:00
|
|
|
d.log.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid)
|
2016-03-04 03:29:24 -05:00
|
|
|
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("could not send signal: %v", err)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := d.cmd.Process.Kill(); err != nil {
|
2016-12-14 04:52:51 -05:00
|
|
|
d.log.Logf("Could not kill daemon: %v", err)
|
2016-03-04 03:29:24 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
d.cmd.Wait()
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil {
|
2016-03-18 14:50:19 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-03-04 03:29:24 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-09 17:20:14 -05:00
|
|
|
// Restart will restart the daemon by first stopping it and the starting it.
|
|
|
|
// If an error occurs while starting the daemon, the test will fail.
|
2016-12-14 04:52:51 -05:00
|
|
|
func (d *Daemon) Restart(t testingT, args ...string) {
|
|
|
|
d.Stop(t)
|
2016-12-09 17:20:14 -05:00
|
|
|
d.handleUserns()
|
2016-12-14 04:52:51 -05:00
|
|
|
d.Start(t, args...)
|
2016-12-09 17:20:14 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// RestartWithError will restart the daemon by first stopping it and then starting it.
|
|
|
|
func (d *Daemon) RestartWithError(arg ...string) error {
|
|
|
|
if err := d.StopWithError(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.handleUserns()
|
|
|
|
return d.StartWithError(arg...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Daemon) handleUserns() {
|
2016-03-04 03:29:24 -05:00
|
|
|
// in the case of tests running a user namespace-enabled daemon, we have resolved
|
2016-12-09 04:17:53 -05:00
|
|
|
// d.Root to be the actual final path of the graph dir after the "uid.gid" of
|
2016-03-04 03:29:24 -05:00
|
|
|
// remapped root is added--we need to subtract it from the path before calling
|
|
|
|
// start or else we will continue making subdirectories rather than truly restarting
|
|
|
|
// with the same location/root:
|
|
|
|
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
2016-12-09 04:17:53 -05:00
|
|
|
d.Root = filepath.Dir(d.Root)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-25 16:51:41 -04:00
|
|
|
// LoadBusybox image into the daemon
|
|
|
|
func (d *Daemon) LoadBusybox(t testingT) {
|
|
|
|
clientHost, err := client.NewEnvClient()
|
|
|
|
require.NoError(t, err, "failed to create client")
|
|
|
|
defer clientHost.Close()
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"})
|
|
|
|
require.NoError(t, err, "failed to download busybox")
|
|
|
|
defer reader.Close()
|
|
|
|
|
|
|
|
client, err := d.NewClient()
|
|
|
|
require.NoError(t, err, "failed to create client")
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
resp, err := client.ImageLoad(ctx, reader, true)
|
|
|
|
require.NoError(t, err, "failed to load busybox")
|
|
|
|
defer resp.Body.Close()
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Daemon) queryRootDir() (string, error) {
|
|
|
|
// update daemon root by asking /info endpoint (to support user
|
|
|
|
// namespaced daemon with root remapped uid.gid directory)
|
|
|
|
clientConfig, err := d.getClientConfig()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
client := &http.Client{
|
|
|
|
Transport: clientConfig.transport,
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequest("GET", "/info", nil)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
req.Header.Set("Content-Type", "application/json")
|
|
|
|
req.URL.Host = clientConfig.addr
|
|
|
|
req.URL.Scheme = clientConfig.scheme
|
|
|
|
|
|
|
|
resp, err := client.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
|
|
|
|
return resp.Body.Close()
|
|
|
|
})
|
|
|
|
|
|
|
|
type Info struct {
|
|
|
|
DockerRootDir string
|
|
|
|
}
|
|
|
|
var b []byte
|
|
|
|
var i Info
|
2017-08-21 18:50:40 -04:00
|
|
|
b, err = request.ReadBody(body)
|
2016-07-19 03:40:20 -04:00
|
|
|
if err == nil && resp.StatusCode == http.StatusOK {
|
2016-03-04 03:29:24 -05:00
|
|
|
// read the docker root dir
|
|
|
|
if err = json.Unmarshal(b, &i); err == nil {
|
|
|
|
return i.DockerRootDir, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// Sock returns the socket path of the daemon
|
|
|
|
func (d *Daemon) Sock() string {
|
2016-07-28 10:19:09 -04:00
|
|
|
return fmt.Sprintf("unix://" + d.sockPath())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Daemon) sockPath() string {
|
2016-12-09 04:17:53 -05:00
|
|
|
return filepath.Join(SockRoot, d.id+".sock")
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// WaitRun waits for a container to be running for 10s
|
|
|
|
func (d *Daemon) WaitRun(contID string) error {
|
|
|
|
args := []string{"--host", d.Sock()}
|
|
|
|
return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
2017-08-22 17:07:52 -04:00
|
|
|
// Info returns the info struct for this daemon
|
|
|
|
func (d *Daemon) Info(t require.TestingT) types.Info {
|
|
|
|
apiclient, err := request.NewClientForHost(d.Sock())
|
|
|
|
require.NoError(t, err)
|
|
|
|
info, err := apiclient.Info(context.Background())
|
|
|
|
require.NoError(t, err)
|
|
|
|
return info
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
2017-01-19 10:55:51 -05:00
|
|
|
// Cmd executes a docker CLI command against this daemon.
|
2016-03-04 03:29:24 -05:00
|
|
|
// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version
|
2016-07-22 12:14:05 -04:00
|
|
|
func (d *Daemon) Cmd(args ...string) (string, error) {
|
2017-01-16 10:39:12 -05:00
|
|
|
result := icmd.RunCmd(d.Command(args...))
|
|
|
|
return result.Combined(), result.Error
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
2017-01-19 10:55:51 -05:00
|
|
|
// Command creates a docker CLI command against this daemon, to be executed later.
|
|
|
|
// Example: d.Command("version") creates a command to run "docker -H unix://path/to/unix.sock version"
|
2017-01-16 10:39:12 -05:00
|
|
|
func (d *Daemon) Command(args ...string) icmd.Cmd {
|
|
|
|
return icmd.Command(d.dockerBinary, d.PrependHostArg(args)...)
|
2016-10-24 17:44:51 -04:00
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// PrependHostArg prepend the specified arguments by the daemon host flags
|
|
|
|
func (d *Daemon) PrependHostArg(args []string) []string {
|
2016-07-22 12:14:05 -04:00
|
|
|
for _, arg := range args {
|
|
|
|
if arg == "--host" || arg == "-H" {
|
|
|
|
return args
|
|
|
|
}
|
|
|
|
}
|
2016-12-09 04:17:53 -05:00
|
|
|
return append([]string{"--host", d.Sock()}, args...)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
|
2016-06-13 22:54:20 -04:00
|
|
|
// SockRequest executes a socket request on a daemon and returns statuscode and output.
|
|
|
|
func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) {
|
|
|
|
jsonData := bytes.NewBuffer(nil)
|
|
|
|
if err := json.NewEncoder(jsonData).Encode(data); err != nil {
|
|
|
|
return -1, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json")
|
|
|
|
if err != nil {
|
|
|
|
return -1, nil, err
|
|
|
|
}
|
2017-08-21 18:50:40 -04:00
|
|
|
b, err := request.ReadBody(body)
|
2016-06-13 22:54:20 -04:00
|
|
|
return res.StatusCode, b, err
|
|
|
|
}
|
|
|
|
|
2016-11-18 02:51:36 -05:00
|
|
|
// SockRequestRaw executes a socket request on a daemon and returns an http
|
2016-06-13 22:54:20 -04:00
|
|
|
// response and a reader for the output data.
|
2017-02-28 11:12:30 -05:00
|
|
|
// Deprecated: use request package instead
|
2016-06-13 22:54:20 -04:00
|
|
|
func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) {
|
2016-12-30 04:49:36 -05:00
|
|
|
return request.SockRequestRaw(method, endpoint, data, ct, d.Sock())
|
2016-06-13 22:54:20 -04:00
|
|
|
}
|
|
|
|
|
2016-12-20 22:03:39 -05:00
|
|
|
// LogFileName returns the path the daemon's log file
|
2016-03-04 03:29:24 -05:00
|
|
|
func (d *Daemon) LogFileName() string {
|
|
|
|
return d.logFile.Name()
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// GetIDByName returns the ID of an object (container, volume, …) given its name
|
|
|
|
func (d *Daemon) GetIDByName(name string) (string, error) {
|
2016-03-04 03:29:24 -05:00
|
|
|
return d.inspectFieldWithError(name, "Id")
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// ActiveContainers returns the list of ids of the currently running containers
|
|
|
|
func (d *Daemon) ActiveContainers() (ids []string) {
|
|
|
|
// FIXME(vdemeester) shouldn't ignore the error
|
2016-06-13 22:54:20 -04:00
|
|
|
out, _ := d.Cmd("ps", "-q")
|
|
|
|
for _, id := range strings.Split(out, "\n") {
|
|
|
|
if id = strings.TrimSpace(id); id != "" {
|
|
|
|
ids = append(ids, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// ReadLogFile returns the content of the daemon log file
|
|
|
|
func (d *Daemon) ReadLogFile() ([]byte, error) {
|
|
|
|
return ioutil.ReadFile(d.logFile.Name())
|
|
|
|
}
|
|
|
|
|
2016-12-28 20:08:03 -05:00
|
|
|
// InspectField returns the field filter by 'filter'
|
|
|
|
func (d *Daemon) InspectField(name, filter string) (string, error) {
|
|
|
|
return d.inspectFilter(name, filter)
|
|
|
|
}
|
|
|
|
|
2016-03-04 03:29:24 -05:00
|
|
|
func (d *Daemon) inspectFilter(name, filter string) (string, error) {
|
|
|
|
format := fmt.Sprintf("{{%s}}", filter)
|
|
|
|
out, err := d.Cmd("inspect", "-f", format, name)
|
|
|
|
if err != nil {
|
2016-12-09 04:18:02 -05:00
|
|
|
return "", errors.Errorf("failed to inspect %s: %s", name, out)
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
|
|
|
return strings.TrimSpace(out), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Daemon) inspectFieldWithError(name, field string) (string, error) {
|
|
|
|
return d.inspectFilter(name, fmt.Sprintf(".%s", field))
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// FindContainerIP returns the ip of the specified container
|
2016-12-09 04:18:02 -05:00
|
|
|
func (d *Daemon) FindContainerIP(id string) (string, error) {
|
|
|
|
out, err := d.Cmd("inspect", "--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'", id)
|
2016-03-04 03:29:24 -05:00
|
|
|
if err != nil {
|
2016-12-09 04:18:02 -05:00
|
|
|
return "", err
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
2016-12-09 04:18:02 -05:00
|
|
|
return strings.Trim(out, " \r\n'"), nil
|
2016-03-04 03:29:24 -05:00
|
|
|
}
|
2016-05-24 04:13:54 -04:00
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// BuildImageWithOut builds an image with the specified dockerfile and options and returns the output
|
|
|
|
func (d *Daemon) BuildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) {
|
|
|
|
buildCmd := BuildImageCmdWithHost(d.dockerBinary, name, dockerfile, d.Sock(), useCache, buildFlags...)
|
|
|
|
result := icmd.RunCmd(icmd.Cmd{
|
|
|
|
Command: buildCmd.Args,
|
|
|
|
Env: buildCmd.Env,
|
|
|
|
Dir: buildCmd.Dir,
|
|
|
|
Stdin: buildCmd.Stdin,
|
|
|
|
Stdout: buildCmd.Stdout,
|
|
|
|
})
|
|
|
|
return result.Combined(), result.ExitCode, result.Error
|
2016-05-24 04:13:54 -04:00
|
|
|
}
|
2016-06-13 22:54:20 -04:00
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// CheckActiveContainerCount returns the number of active containers
|
|
|
|
// FIXME(vdemeester) should re-use ActivateContainers in some way
|
|
|
|
func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) {
|
2016-06-13 22:54:20 -04:00
|
|
|
out, err := d.Cmd("ps", "-q")
|
|
|
|
c.Assert(err, checker.IsNil)
|
|
|
|
if len(strings.TrimSpace(out)) == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out))
|
|
|
|
}
|
2016-07-28 11:58:06 -04:00
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// ReloadConfig asks the daemon to reload its configuration
|
|
|
|
func (d *Daemon) ReloadConfig() error {
|
2016-07-28 11:58:06 -04:00
|
|
|
if d.cmd == nil || d.cmd.Process == nil {
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.New("daemon is not running")
|
2016-07-28 11:58:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
errCh := make(chan error)
|
|
|
|
started := make(chan struct{})
|
|
|
|
go func() {
|
2017-03-06 10:35:27 -05:00
|
|
|
_, body, err := request.DoOnHost(d.Sock(), "/events", request.Method(http.MethodGet))
|
2016-07-28 11:58:06 -04:00
|
|
|
close(started)
|
|
|
|
if err != nil {
|
|
|
|
errCh <- err
|
|
|
|
}
|
|
|
|
defer body.Close()
|
|
|
|
dec := json.NewDecoder(body)
|
|
|
|
for {
|
|
|
|
var e events.Message
|
|
|
|
if err := dec.Decode(&e); err != nil {
|
|
|
|
errCh <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if e.Type != events.DaemonEventType {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if e.Action != "reload" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
close(errCh) // notify that we are done
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
<-started
|
|
|
|
if err := signalDaemonReload(d.cmd.Process.Pid); err != nil {
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("error signaling daemon reload: %v", err)
|
2016-07-28 11:58:06 -04:00
|
|
|
}
|
|
|
|
select {
|
|
|
|
case err := <-errCh:
|
|
|
|
if err != nil {
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("error waiting for daemon reload event: %v", err)
|
2016-07-28 11:58:06 -04:00
|
|
|
}
|
|
|
|
case <-time.After(30 * time.Second):
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.New("timeout waiting for daemon reload event")
|
2016-07-28 11:58:06 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-09 04:17:53 -05:00
|
|
|
|
2017-05-23 23:56:26 -04:00
|
|
|
// NewClient creates new client based on daemon's socket path
|
|
|
|
func (d *Daemon) NewClient() (*client.Client, error) {
|
|
|
|
httpClient, err := request.NewHTTPClient(d.Sock())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return client.NewClient(d.Sock(), api.DefaultVersion, httpClient, nil)
|
|
|
|
}
|
|
|
|
|
2016-12-09 04:17:53 -05:00
|
|
|
// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time.
|
2017-04-11 15:18:30 -04:00
|
|
|
// Deprecated: use cli.WaitCmd instead
|
2016-12-09 04:17:53 -05:00
|
|
|
func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error {
|
|
|
|
after := time.After(timeout)
|
|
|
|
|
|
|
|
args := append(arg, "inspect", "-f", expr, name)
|
|
|
|
for {
|
|
|
|
result := icmd.RunCommand(dockerBinary, args...)
|
|
|
|
if result.Error != nil {
|
2016-12-10 03:03:38 -05:00
|
|
|
if !strings.Contains(strings.ToLower(result.Stderr()), "no such") {
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("error executing docker inspect: %v\n%s",
|
2016-12-09 04:17:53 -05:00
|
|
|
result.Stderr(), result.Stdout())
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-after:
|
|
|
|
return result.Error
|
|
|
|
default:
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out := strings.TrimSpace(result.Stdout())
|
|
|
|
if out == expected {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-after:
|
2016-12-09 04:18:02 -05:00
|
|
|
return errors.Errorf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout)
|
2016-12-09 04:17:53 -05:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// BuildImageCmdWithHost create a build command with the specified arguments.
|
2017-01-16 05:30:14 -05:00
|
|
|
// Deprecated
|
2016-12-09 04:17:53 -05:00
|
|
|
// FIXME(vdemeester) move this away
|
|
|
|
func BuildImageCmdWithHost(dockerBinary, name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd {
|
|
|
|
args := []string{}
|
|
|
|
if host != "" {
|
|
|
|
args = append(args, "--host", host)
|
|
|
|
}
|
|
|
|
args = append(args, "build", "-t", name)
|
|
|
|
if !useCache {
|
|
|
|
args = append(args, "--no-cache")
|
|
|
|
}
|
|
|
|
args = append(args, buildFlags...)
|
|
|
|
args = append(args, "-")
|
|
|
|
buildCmd := exec.Command(dockerBinary, args...)
|
|
|
|
buildCmd.Stdin = strings.NewReader(dockerfile)
|
|
|
|
return buildCmd
|
|
|
|
}
|