Merge pull request #29465 from docker/1.13.0-rc4-cherrypicks

1.13.0-rc4 cherry-picks: part2
This commit is contained in:
Victor Vieux 2016-12-16 13:12:57 -08:00 committed by GitHub
commit f3749c5a9c
45 changed files with 608 additions and 177 deletions

View File

@ -218,6 +218,7 @@ To manually remove all plugins and resolve this problem, take the following step
- Deprecate unversioned API endpoints [#28208](https://github.com/docker/docker/pull/28208)
- Remove Ubuntu 15.10 (Wily Werewolf) as supported platform. Ubuntu 15.10 is EOL, and no longer receives updates [#27042](https://github.com/docker/docker/pull/27042)
- Remove Fedora 22 as supported platform. Fedora 22 is EOL, and no longer receives updates [#27432](https://github.com/docker/docker/pull/27432)
- Remove Fedora 23 as supported platform. Fedora 23 is EOL, and no longer receives updates [#29455](https://github.com/docker/docker/pull/29455)
- Deprecate the `repo:shortid` syntax on `docker pull` [#27207](https://github.com/docker/docker/pull/27207)
- Deprecate backing filesystem without `d_type` for overlay and overlay2 storage drivers [#27433](https://github.com/docker/docker/pull/27433)
- Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466)

View File

@ -80,7 +80,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
return err
}
if _, err := n.clusterProvider.GetNetwork(create.Name); err == nil {
if nws, err := n.clusterProvider.GetNetworksByName(create.Name); err == nil && len(nws) > 0 {
return libnetwork.NetworkNameError(create.Name)
}

View File

@ -19,7 +19,7 @@ func newLeaveCommand(dockerCli *command.DockerCli) *cobra.Command {
cmd := &cobra.Command{
Use: "leave [OPTIONS]",
Short: "Leave the swarm (workers only)",
Short: "Leave the swarm",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runLeave(dockerCli, opts)

View File

@ -23,6 +23,12 @@ func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
return runUpdate(dockerCli, cmd.Flags(), opts)
},
PreRunE: func(cmd *cobra.Command, args []string) error {
if cmd.Flags().NFlag() == 0 {
return pflag.ErrHelp
}
return nil
},
}
cmd.Flags().BoolVar(&opts.autolock, flagAutolock, false, "Change manager autolocking setting (true|false)")

View File

@ -1,19 +0,0 @@
#
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"!
#
FROM fedora:23
RUN dnf -y upgrade
RUN dnf install -y @development-tools fedora-packager
RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
ENV GO_VERSION 1.7.3
RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux
ENV RUNC_BUILDTAGS seccomp selinux

View File

@ -4,8 +4,8 @@ set -e
# usage: ./generate.sh [versions]
# ie: ./generate.sh
# to update all Dockerfiles in this directory
# or: ./generate.sh
# to only update fedora-23/Dockerfile
# or: ./generate.sh centos-7
# to only update centos-7/Dockerfile
# or: ./generate.sh fedora-newversion
# to create a new folder and a Dockerfile within it

View File

@ -3514,8 +3514,10 @@ _docker_stack() {
_docker_stack_deploy() {
case "$prev" in
--bundle-file)
_filedir dab
return
if __docker_is_experimental ; then
_filedir dab
return
fi
;;
--compose-file|-c)
_filedir yml
@ -3525,7 +3527,9 @@ _docker_stack_deploy() {
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--bundle-file --compose-file -c --help --with-registry-auth" -- "$cur" ) )
local options="--compose-file -c --help --with-registry-auth"
__docker_is_experimental && options+=" --bundle-file"
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
;;
esac
}

View File

@ -1556,8 +1556,7 @@ func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) {
return convert.BasicNetworkFromGRPC(*network), nil
}
// GetNetworks returns all current cluster managed networks.
func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) {
func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) {
c.RLock()
defer c.RUnlock()
@ -1568,7 +1567,7 @@ func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) {
ctx, cancel := c.getRequestContext()
defer cancel()
r, err := c.client.ListNetworks(ctx, &swarmapi.ListNetworksRequest{})
r, err := c.client.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters})
if err != nil {
return nil, err
}
@ -1582,6 +1581,21 @@ func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) {
return networks, nil
}
// GetNetworks returns all current cluster managed networks.
func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) {
return c.getNetworks(nil)
}
// GetNetworksByName returns cluster managed networks by name.
// It is ok to have multiple networks here. #18864
func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) {
// Note that swarmapi.GetNetworkRequest.Name is not functional.
// So we cannot just use that with c.GetNetwork.
return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{
Names: []string{name},
})
}
func attacherKey(target, containerID string) string {
return containerID + ":" + target
}

View File

@ -149,6 +149,10 @@ func (daemon *Daemon) restore() error {
continue
}
container.RWLayer = rwlayer
if err := daemon.Mount(container); err != nil {
logrus.Errorf("Failed to mount container %v: %v", id, err)
continue
}
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
@ -790,6 +794,12 @@ func (daemon *Daemon) Shutdown() error {
})
}
if daemon.volumes != nil {
if err := daemon.volumes.Shutdown(); err != nil {
logrus.Errorf("Error shutting down volume store: %v", err)
}
}
if daemon.layerStore != nil {
if err := daemon.layerStore.Cleanup(); err != nil {
logrus.Errorf("Error during layer Store.Cleanup(): %v", err)

View File

@ -154,10 +154,12 @@ func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.I
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
if pluginDriver, err := lookupPlugin(name, home, options, pg); err == nil {
pluginDriver, err := lookupPlugin(name, home, options, pg)
if err == nil {
return pluginDriver, nil
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, home)
logrus.WithError(err).WithField("driver", name).WithField("home-dir", home).Error("Failed to GetDriver graph")
return nil, ErrNotSupported
}

View File

@ -6,6 +6,7 @@ import (
"path/filepath"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/plugin/v2"
)
type pluginClient interface {
@ -26,6 +27,13 @@ func lookupPlugin(name, home string, opts []string, pg plugingetter.PluginGetter
}
func newPluginDriver(name, home string, opts []string, pl plugingetter.CompatPlugin) (Driver, error) {
proxy := &graphDriverProxy{name, pl.Client(), pl}
if !pl.IsV1() {
if p, ok := pl.(*v2.Plugin); ok {
if p.PropagatedMount != "" {
home = p.PluginObj.Config.PropagatedMount
}
}
}
proxy := &graphDriverProxy{name, pl}
return proxy, proxy.Init(filepath.Join(home, name), opts)
}

View File

@ -4,15 +4,15 @@ import (
"errors"
"fmt"
"io"
"path/filepath"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/plugingetter"
)
type graphDriverProxy struct {
name string
client pluginClient
p plugingetter.CompatPlugin
name string
p plugingetter.CompatPlugin
}
type graphDriverRequest struct {
@ -48,7 +48,7 @@ func (d *graphDriverProxy) Init(home string, opts []string) error {
Opts: opts,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil {
return err
}
if ret.Err != "" {
@ -73,7 +73,7 @@ func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts)
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
return err
}
if ret.Err != "" {
@ -93,7 +93,7 @@ func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error {
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.Create", args, &ret); err != nil {
return err
}
if ret.Err != "" {
@ -105,7 +105,7 @@ func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error {
func (d *graphDriverProxy) Remove(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil {
return err
}
if ret.Err != "" {
@ -120,20 +120,20 @@ func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil {
return "", err
}
var err error
if ret.Err != "" {
err = errors.New(ret.Err)
}
return ret.Dir, err
return filepath.Join(d.p.BasePath(), ret.Dir), err
}
func (d *graphDriverProxy) Put(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil {
return err
}
if ret.Err != "" {
@ -145,7 +145,7 @@ func (d *graphDriverProxy) Put(id string) error {
func (d *graphDriverProxy) Exists(id string) bool {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil {
return false
}
return ret.Exists
@ -154,7 +154,7 @@ func (d *graphDriverProxy) Exists(id string) bool {
func (d *graphDriverProxy) Status() [][2]string {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil {
return nil
}
return ret.Status
@ -165,7 +165,7 @@ func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) {
ID: id,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
@ -184,7 +184,7 @@ func (d *graphDriverProxy) Cleanup() error {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil {
return nil
}
if ret.Err != "" {
@ -198,7 +198,7 @@ func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) {
ID: id,
Parent: parent,
}
body, err := d.client.Stream("GraphDriver.Diff", args)
body, err := d.p.Client().Stream("GraphDriver.Diff", args)
if err != nil {
return nil, err
}
@ -211,7 +211,7 @@ func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error)
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
@ -223,7 +223,7 @@ func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error)
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
var ret graphDriverResponse
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
@ -238,7 +238,7 @@ func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) {
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil {
if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil {
return -1, err
}
if ret.Err != "" {

View File

@ -21,6 +21,7 @@ Usage: docker network create [OPTIONS] NETWORK
Create a network
Options:
--attachable Enable manual container attachment
--aux-address value Auxiliary IPv4 or IPv6 addresses used by Network
driver (default map[])
-d, --driver string Driver to manage the Network (default "bridge")

View File

@ -34,4 +34,9 @@ $ docker node demote <node name>
## Related information
* [node inspect](node_inspect.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -129,7 +129,9 @@ Example output:
## Related information
* [node update](node_update.md)
* [node ps](node_ps.md)
* [node demote](node_demote.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -122,7 +122,9 @@ e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader
## Related information
* [node demote](node_demote.md)
* [node inspect](node_inspect.md)
* [node update](node_update.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -34,3 +34,8 @@ $ docker node promote <node name>
## Related information
* [node demote](node_demote.md)
* [node inspect](node_inspect.md)
* [node ls](node_ls.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -100,7 +100,9 @@ The `desired-state` filter can take the values `running`, `shutdown`, and `accep
## Related information
* [node demote](node_demote.md)
* [node inspect](node_inspect.md)
* [node update](node_update.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node rm](node_rm.md)
* [node update](node_update.md)

View File

@ -65,8 +65,9 @@ before you can remove it from the swarm.
## Related information
* [node inspect](node_inspect.md)
* [node update](node_update.md)
* [node demote](node_demote.md)
* [node ps](node_ps.md)
* [node inspect](node_inspect.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node update](node_update.md)

View File

@ -63,7 +63,9 @@ metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/).
## Related information
* [node demote](node_demote.md)
* [node inspect](node_inspect.md)
* [node ps](node_ps.md)
* [node ls](node_ls.md)
* [node promote](node_promote.md)
* [node ps](node_ps.md)
* [node rm](node_rm.md)

View File

@ -24,9 +24,9 @@ Options:
--help Print usage
```
The `docker pause` command suspends all processes in a container. On Linux,
this uses the cgroups freezer. Traditionally, when suspending a process the
`SIGSTOP` signal is used, which is observable by the process being suspended.
The `docker pause` command suspends all processes in the specified containers.
On Linux, this uses the cgroups freezer. Traditionally, when suspending a process
the `SIGSTOP` signal is used, which is observable by the process being suspended.
With the cgroups freezer the process is unaware, and unable to capture,
that it is being suspended, and subsequently resumed. On Windows, only Hyper-V
containers can be paused.
@ -34,3 +34,7 @@ containers can be paused.
See the
[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt)
for further details.
## Related information
* [unpause](unpause.md)

View File

@ -474,7 +474,7 @@ accessible at the target port on every node regardless if there is a task for
the service running on the node. For more information refer to
[Use swarm mode routing mesh](https://docs.docker.com/engine/swarm/ingress/).
### Publish a port for TCP only or UCP only
### Publish a port for TCP only or UDP only
By default, when you publish a port, it is a TCP port. You can
specifically publish a UDP port instead of or in addition to a TCP port. When
@ -546,6 +546,7 @@ x3ti0erg11rjpg64m75kej2mz-hosttempl
## Related information
* [service inspect](service_inspect.md)
* [service logs](service_logs.md)
* [service ls](service_ls.md)
* [service rm](service_rm.md)
* [service scale](service_scale.md)

View File

@ -154,6 +154,7 @@ $ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis
## Related information
* [service create](service_create.md)
* [service logs](service_logs.md)
* [service ls](service_ls.md)
* [service rm](service_rm.md)
* [service scale](service_scale.md)

View File

@ -65,3 +65,13 @@ that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap
seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a
fraction of a second no more than nine digits long. You can combine the
`--since` option with either or both of the `--follow` or `--tail` options.
## Related information
* [service create](service_create.md)
* [service inspect](service_inspect.md)
* [service ls](service_ls.md)
* [service rm](service_rm.md)
* [service scale](service_scale.md)
* [service ps](service_ps.md)
* [service update](service_update.md)

View File

@ -107,6 +107,7 @@ ID NAME MODE REPLICAS IMAGE
* [service create](service_create.md)
* [service inspect](service_inspect.md)
* [service logs](service_logs.md)
* [service rm](service_rm.md)
* [service scale](service_scale.md)
* [service ps](service_ps.md)

View File

@ -154,6 +154,7 @@ The `desired-state` filter can take the values `running`, `shutdown`, and `accep
* [service create](service_create.md)
* [service inspect](service_inspect.md)
* [service logs](service_logs.md)
* [service ls](service_ls.md)
* [service rm](service_rm.md)
* [service scale](service_scale.md)

View File

@ -48,6 +48,7 @@ ID NAME MODE REPLICAS IMAGE
* [service create](service_create.md)
* [service inspect](service_inspect.md)
* [service logs](service_logs.md)
* [service ls](service_ls.md)
* [service scale](service_scale.md)
* [service ps](service_ps.md)

View File

@ -29,7 +29,7 @@ Options:
### Scale a service
The scale command enables you to scale one or more replicated services either up
or down to the desired number of replicas. This command cannot be applied on
or down to the desired number of replicas. This command cannot be applied on
services which are global mode. The command will return immediately, but the
actual scaling of the service may take some time. To stop all replicas of a
service while keeping the service active in the swarm you can set the scale to 0.
@ -89,6 +89,7 @@ ID NAME MODE REPLICAS IMAGE
* [service create](service_create.md)
* [service inspect](service_inspect.md)
* [service logs](service_logs.md)
* [service ls](service_ls.md)
* [service rm](service_rm.md)
* [service ps](service_ps.md)

View File

@ -174,6 +174,8 @@ See [`service create`](./service_create.md#templating) for the reference.
* [service create](service_create.md)
* [service inspect](service_inspect.md)
* [service ps](service_ps.md)
* [service logs](service_logs.md)
* [service ls](service_ls.md)
* [service ps](service_ps.md)
* [service rm](service_rm.md)
* [service scale](service_scale.md)

View File

@ -18,7 +18,7 @@ keywords: "swarm, leave"
```markdown
Usage: docker swarm leave [OPTIONS]
Leave the swarm (workers only)
Leave the swarm
Options:
-f, --force Force this node to leave the swarm, ignoring warnings

View File

@ -24,9 +24,13 @@ Options:
--help Print usage
```
The `docker unpause` command un-suspends all processes in a container.
The `docker unpause` command un-suspends all processes in the specified containers.
On Linux, it does this using the cgroups freezer.
See the
[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt)
for further details.
## Related information
* [pause](pause.md)

View File

@ -257,6 +257,45 @@ func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) {
c.Assert(exists, checker.Equals, true)
}
func (s *DockerDaemonSuite) TestGraphdriverPlugin(c *check.C) {
testRequires(c, Network, IsAmd64, DaemonIsLinux, overlaySupported)
s.d.Start()
// install the plugin
plugin := "cpuguy83/docker-overlay2-graphdriver-plugin"
out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", plugin)
c.Assert(err, checker.IsNil, check.Commentf(out))
// restart the daemon with the plugin set as the storage driver
s.d.Restart("-s", plugin)
// run a container
out, err = s.d.Cmd("run", "--rm", "busybox", "true") // this will pull busybox using the plugin
c.Assert(err, checker.IsNil, check.Commentf(out))
}
func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) {
testRequires(c, DaemonIsLinux, Network, IsAmd64)
s.d.Start("--live-restore=true")
out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName)
c.Assert(err, checker.IsNil, check.Commentf(out))
c.Assert(strings.TrimSpace(out), checker.Contains, pName)
out, err = s.d.Cmd("volume", "create", "--driver", pName, "test")
c.Assert(err, checker.IsNil, check.Commentf(out))
s.d.Restart("--live-restore=true")
out, err = s.d.Cmd("plugin", "disable", pName)
c.Assert(err, checker.IsNil, check.Commentf(out))
out, err = s.d.Cmd("plugin", "rm", pName)
c.Assert(err, checker.NotNil, check.Commentf(out))
c.Assert(out, checker.Contains, "in use")
}
func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) {
mounts, err := mount.GetMounts()
if err != nil {

View File

@ -1343,7 +1343,7 @@ func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) {
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), "Public Key ID does not match") {
c.Fatal("Missing KeyID message from daemon logs")
c.Fatalf("Missing KeyID message from daemon logs: %s", string(content))
}
}
@ -2927,3 +2927,28 @@ func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C)
content, _ := ioutil.ReadFile(s.d.logFile.Name())
c.Assert(string(content), checker.Contains, expectedMessage)
}
// Test case for 29342
func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) {
testRequires(c, DaemonIsLinux)
s.d.StartWithBusybox("--live-restore")
out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && top")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
waitRun("top")
out1, err := s.d.Cmd("exec", "-u", "test", "top", "id")
// uid=100(test) gid=101(test) groups=101(test)
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out1))
// restart daemon.
s.d.Restart("--live-restore")
out2, err := s.d.Cmd("exec", "-u", "test", "top", "id")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out2))
c.Assert(out1, check.Equals, out2, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2))
out, err = s.d.Cmd("stop", "top")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
}

View File

@ -1086,6 +1086,24 @@ func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) {
c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]")
}
// TODO: migrate to a unit test
// This test could be migrated to unit test and save costly integration test,
// once PR #29143 is merged.
func (s *DockerSwarmSuite) TestSwarmUpdateWithoutArgs(c *check.C) {
d := s.AddDaemon(c, true, true)
expectedOutput := `
Usage: docker swarm update [OPTIONS]
Update the swarm
Options:`
out, err := d.Cmd("swarm", "update")
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out))
}
func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) {
d := s.swarmSuite.AddDaemon(c, true, true)
@ -1166,3 +1184,50 @@ func (s *DockerTrustedSwarmSuite) TestTrustedServiceUpdate(c *check.C) {
c.Assert(err, check.NotNil, check.Commentf(out))
c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out))
}
// Test case for issue #27866, which did not allow NW name that is the prefix of a swarm NW ID.
// e.g. if the ingress ID starts with "n1", it was impossible to create a NW named "n1".
func (s *DockerSwarmSuite) TestSwarmNetworkCreateIssue27866(c *check.C) {
d := s.AddDaemon(c, true, true)
out, err := d.Cmd("network", "inspect", "-f", "{{.Id}}", "ingress")
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
ingressID := strings.TrimSpace(out)
c.Assert(ingressID, checker.Not(checker.Equals), "")
// create a network of which name is the prefix of the ID of an overlay network
// (ingressID in this case)
newNetName := ingressID[0:2]
out, err = d.Cmd("network", "create", "--driver", "overlay", newNetName)
// In #27866, it was failing because of "network with name %s already exists"
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
out, err = d.Cmd("network", "rm", newNetName)
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
}
// Test case for https://github.com/docker/docker/pull/27938#issuecomment-265768303
// This test creates two networks with the same name sequentially, with various drivers.
// Since the operations in this test are done sequentially, the 2nd call should fail with
// "network with name FOO already exists".
// Note that it is to ok have multiple networks with the same name if the operations are done
// in parallel. (#18864)
func (s *DockerSwarmSuite) TestSwarmNetworkCreateDup(c *check.C) {
d := s.AddDaemon(c, true, true)
drivers := []string{"bridge", "overlay"}
for i, driver1 := range drivers {
nwName := fmt.Sprintf("network-test-%d", i)
for _, driver2 := range drivers {
c.Logf("Creating a network named %q with %q, then %q",
nwName, driver1, driver2)
out, err := d.Cmd("network", "create", "--driver", driver1, nwName)
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
out, err = d.Cmd("network", "create", "--driver", driver2, nwName)
c.Assert(out, checker.Contains,
fmt.Sprintf("network with name %s already exists", nwName))
c.Assert(err, checker.NotNil)
c.Logf("As expected, the attempt to network %q with %q failed: %s",
nwName, driver2, out)
out, err = d.Cmd("network", "rm", nwName)
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
}
}
}

View File

@ -3,7 +3,9 @@
package main
import (
"bytes"
"io/ioutil"
"os/exec"
"strings"
"github.com/docker/docker/pkg/sysinfo"
@ -122,6 +124,17 @@ var (
},
"Test cannot be run without a kernel (4.3+) supporting ambient capabilities",
}
overlaySupported = testRequirement{
func() bool {
cmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "cat /proc/filesystems")
out, err := cmd.CombinedOutput()
if err != nil {
return false
}
return bytes.Contains(out, []byte("overlay\n"))
},
"Test cannot be run wihtout suppport for ovelayfs",
}
)
func init() {

View File

@ -6,6 +6,7 @@ docker-network-create - create a new network
# SYNOPSIS
**docker network create**
[**--attachable**]
[**--aux-address**=*map[]*]
[**-d**|**--driver**=*DRIVER*]
[**--gateway**=*[]*]
@ -143,6 +144,9 @@ to create an externally isolated `overlay` network, you can specify the
`--internal` option.
# OPTIONS
**--attachable**
Enable manual container attachment
**--aux-address**=map[]
Auxiliary IPv4 or IPv6 addresses used by network driver

View File

@ -2,7 +2,7 @@
% Docker Community
% JUNE 2014
# NAME
docker-pause - Pause all processes within a container
docker-pause - Pause all processes within one or more containers
# SYNOPSIS
**docker pause**
@ -10,9 +10,9 @@ CONTAINER [CONTAINER...]
# DESCRIPTION
The `docker pause` command suspends all processes in a container. On Linux,
this uses the cgroups freezer. Traditionally, when suspending a process the
`SIGSTOP` signal is used, which is observable by the process being suspended.
The `docker pause` command suspends all processes in the specified containers.
On Linux, this uses the cgroups freezer. Traditionally, when suspending a process
the `SIGSTOP` signal is used, which is observable by the process being suspended.
With the cgroups freezer the process is unaware, and unable to capture,
that it is being suspended, and subsequently resumed. On Windows, only Hyper-V
containers can be paused.
@ -22,10 +22,11 @@ See the [cgroups freezer documentation]
further details.
# OPTIONS
There are no available options.
**--help**
Print usage statement
# See also
**docker-unpause(1)** to unpause all processes within a container.
**docker-unpause(1)** to unpause all processes within one or more containers.
# HISTORY
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>

View File

@ -2,7 +2,7 @@
% Docker Community
% JUNE 2014
# NAME
docker-unpause - Unpause all processes within a container
docker-unpause - Unpause all processes within one or more containers
# SYNOPSIS
**docker unpause**
@ -10,7 +10,7 @@ CONTAINER [CONTAINER...]
# DESCRIPTION
The `docker unpause` command un-suspends all processes in a container.
The `docker unpause` command un-suspends all processes in the specified containers.
On Linux, it does this using the cgroups freezer.
See the [cgroups freezer documentation]
@ -18,10 +18,11 @@ See the [cgroups freezer documentation]
further details.
# OPTIONS
There are no available options.
**--help**
Print usage statement
# See also
**docker-pause(1)** to pause all processes within a container.
**docker-pause(1)** to pause all processes within one or more containers.
# HISTORY
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>

View File

@ -5,7 +5,6 @@ package plugin
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@ -23,6 +22,7 @@ import (
"github.com/docker/docker/plugin/distribution"
"github.com/docker/docker/plugin/v2"
"github.com/docker/docker/reference"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
@ -268,7 +268,7 @@ func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.A
}
// Remove deletes plugin's root directory.
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) (err error) {
p, err := pm.pluginStore.GetByName(name)
pm.mu.RLock()
c := pm.cMap[p]
@ -294,12 +294,18 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
}
id := p.GetID()
pm.pluginStore.Remove(p)
pluginDir := filepath.Join(pm.libRoot, id)
if err := os.RemoveAll(pluginDir); err != nil {
logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err)
defer func() {
if err == nil || config.ForceRemove {
pm.pluginStore.Remove(p)
pm.pluginEventLogger(id, name, "remove")
}
}()
if err = os.RemoveAll(pluginDir); err != nil {
return errors.Wrap(err, "failed to remove plugin directory")
}
pm.pluginEventLogger(id, name, "remove")
return nil
}

View File

@ -156,7 +156,7 @@ func (pm *Manager) reload() error {
// We should only enable rootfs propagation for certain plugin types that need it.
for _, typ := range p.PluginObj.Config.Interface.Types {
if typ.Capability == "volumedriver" && typ.Prefix == "docker" && strings.HasPrefix(typ.Version, "1.") {
if (typ.Capability == "volumedriver" || typ.Capability == "graphdriver") && typ.Prefix == "docker" && strings.HasPrefix(typ.Version, "1.") {
if p.PluginObj.Config.PropagatedMount != "" {
// TODO: sanitize PropagatedMount and prevent breakout
p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount)

View File

@ -134,9 +134,9 @@ func (p *Plugin) InitPlugin() error {
p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts))
copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts)
p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env))
p.PluginObj.Settings.Devices = make([]types.PluginDevice, 0, len(p.PluginObj.Config.Linux.Devices))
p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices))
copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices)
p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env))
for _, env := range p.PluginObj.Config.Env {
if env.Value != nil {
p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value))

78
volume/store/db.go Normal file
View File

@ -0,0 +1,78 @@
package store
import (
"encoding/json"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
)
var volumeBucketName = []byte("volumes")
type dbEntry struct {
Key []byte
Value []byte
}
type volumeMetadata struct {
Name string
Driver string
Labels map[string]string
Options map[string]string
}
func (s *VolumeStore) setMeta(name string, meta volumeMetadata) error {
return s.db.Update(func(tx *bolt.Tx) error {
return setMeta(tx, name, meta)
})
}
func setMeta(tx *bolt.Tx, name string, meta volumeMetadata) error {
metaJSON, err := json.Marshal(meta)
if err != nil {
return err
}
b := tx.Bucket(volumeBucketName)
return errors.Wrap(b.Put([]byte(name), metaJSON), "error setting volume metadata")
}
func (s *VolumeStore) getMeta(name string) (volumeMetadata, error) {
var meta volumeMetadata
err := s.db.View(func(tx *bolt.Tx) error {
return getMeta(tx, name, &meta)
})
return meta, err
}
func getMeta(tx *bolt.Tx, name string, meta *volumeMetadata) error {
b := tx.Bucket(volumeBucketName)
val := b.Get([]byte(name))
if string(val) == "" {
return nil
}
if err := json.Unmarshal(val, meta); err != nil {
return errors.Wrap(err, "error unmarshaling volume metadata")
}
return nil
}
func (s *VolumeStore) removeMeta(name string) error {
return s.db.Update(func(tx *bolt.Tx) error {
return removeMeta(tx, name)
})
}
func removeMeta(tx *bolt.Tx, name string) error {
b := tx.Bucket(volumeBucketName)
return errors.Wrap(b.Delete([]byte(name)), "error removing volume metadata")
}
func listEntries(tx *bolt.Tx) []*dbEntry {
var entries []*dbEntry
b := tx.Bucket(volumeBucketName)
b.ForEach(func(k, v []byte) error {
entries = append(entries, &dbEntry{k, v})
return nil
})
return entries
}

91
volume/store/restore.go Normal file
View File

@ -0,0 +1,91 @@
package store
import (
"encoding/json"
"sync"
"github.com/Sirupsen/logrus"
"github.com/boltdb/bolt"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
)
// restore is called when a new volume store is created.
// It's primary purpose is to ensure that all drivers' refcounts are set based
// on known volumes after a restart.
// This only attempts to track volumes that are actually stored in the on-disk db.
// It does not probe the available drivers to find anything that may have been added
// out of band.
func (s *VolumeStore) restore() {
var entries []*dbEntry
s.db.View(func(tx *bolt.Tx) error {
entries = listEntries(tx)
return nil
})
chRemove := make(chan []byte, len(entries))
var wg sync.WaitGroup
for _, entry := range entries {
wg.Add(1)
// this is potentially a very slow operation, so do it in a goroutine
go func(entry *dbEntry) {
defer wg.Done()
var meta volumeMetadata
if len(entry.Value) != 0 {
if err := json.Unmarshal(entry.Value, &meta); err != nil {
logrus.Errorf("Error while reading volume metadata for volume %q: %v", string(entry.Key), err)
// don't return here, we can try with `getVolume` below
}
}
var v volume.Volume
var err error
if meta.Driver != "" {
v, err = lookupVolume(meta.Driver, string(entry.Key))
if err != nil && err != errNoSuchVolume {
logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", string(entry.Key)).Warn("Error restoring volume")
return
}
if v == nil {
// doesn't exist in the driver, remove it from the db
chRemove <- entry.Key
return
}
} else {
v, err = s.getVolume(string(entry.Key))
if err != nil {
if err == errNoSuchVolume {
chRemove <- entry.Key
}
return
}
meta.Driver = v.DriverName()
if err := s.setMeta(v.Name(), meta); err != nil {
logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", v.Name()).Warn("Error updating volume metadata on restore")
}
}
// increment driver refcount
volumedrivers.CreateDriver(meta.Driver)
// cache the volume
s.globalLock.Lock()
s.options[v.Name()] = meta.Options
s.labels[v.Name()] = meta.Labels
s.names[v.Name()] = v
s.globalLock.Unlock()
}(entry)
}
wg.Wait()
close(chRemove)
s.db.Update(func(tx *bolt.Tx) error {
for k := range chRemove {
if err := removeMeta(tx, string(k)); err != nil {
logrus.Warnf("Error removing stale entry from volume db: %v", err)
}
}
return nil
})
}

View File

@ -1,8 +1,6 @@
package store
import (
"bytes"
"encoding/json"
"net"
"os"
"path/filepath"
@ -19,16 +17,9 @@ import (
)
const (
volumeDataDir = "volumes"
volumeBucketName = "volumes"
volumeDataDir = "volumes"
)
type volumeMetadata struct {
Name string
Labels map[string]string
Options map[string]string
}
type volumeWrapper struct {
volume.Volume
labels map[string]string
@ -89,16 +80,17 @@ func New(rootPath string) (*VolumeStore, error) {
// initialize volumes bucket
if err := vs.db.Update(func(tx *bolt.Tx) error {
if _, err := tx.CreateBucketIfNotExists([]byte(volumeBucketName)); err != nil {
if _, err := tx.CreateBucketIfNotExists(volumeBucketName); err != nil {
return errors.Wrap(err, "error while setting up volume store metadata database")
}
return nil
}); err != nil {
return nil, err
}
}
vs.restore()
return vs, nil
}
@ -131,6 +123,15 @@ func (s *VolumeStore) getRefs(name string) []string {
// the internal data is out of sync with volumes driver plugins.
func (s *VolumeStore) Purge(name string) {
s.globalLock.Lock()
v, exists := s.names[name]
if exists {
if _, err := volumedrivers.RemoveDriver(v.DriverName()); err != nil {
logrus.Error("Error dereferencing volume driver: %v", err)
}
}
if err := s.removeMeta(name); err != nil {
logrus.Errorf("Error removing volume metadata for volume %q: %v", name, err)
}
delete(s.names, name)
delete(s.refs, name)
delete(s.labels, name)
@ -331,24 +332,11 @@ func (s *VolumeStore) checkConflict(name, driverName string) (volume.Volume, err
// volumeExists returns if the volume is still present in the driver.
// An error is returned if there was an issue communicating with the driver.
func volumeExists(v volume.Volume) (bool, error) {
vd, err := volumedrivers.GetDriver(v.DriverName())
exists, err := lookupVolume(v.DriverName(), v.Name())
if err != nil {
return false, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", v.Name(), v.DriverName())
return false, err
}
exists, err := vd.Get(v.Name())
if err != nil {
err = errors.Cause(err)
if _, ok := err.(net.Error); ok {
return false, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", v.Name(), v.DriverName())
}
// At this point, the error could be anything from the driver, such as "no such volume"
// Let's not check an error here, and instead check if the driver returned a volume
}
if exists == nil {
return false, nil
}
return true, nil
return exists != nil, nil
}
// create asks the given driver to create a volume with the name/opts.
@ -404,27 +392,16 @@ func (s *VolumeStore) create(name, driverName string, opts, labels map[string]st
s.options[name] = opts
s.globalLock.Unlock()
if s.db != nil {
metadata := &volumeMetadata{
Name: name,
Labels: labels,
Options: opts,
}
volData, err := json.Marshal(metadata)
if err != nil {
return nil, err
}
if err := s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(volumeBucketName))
err := b.Put([]byte(name), volData)
return err
}); err != nil {
return nil, errors.Wrap(err, "error while persisting volume metadata")
}
metadata := volumeMetadata{
Name: name,
Driver: vd.Name(),
Labels: labels,
Options: opts,
}
if err := s.setMeta(name, metadata); err != nil {
return nil, err
}
return volumeWrapper{v, labels, vd.Scope(), opts}, nil
}
@ -471,48 +448,41 @@ func (s *VolumeStore) Get(name string) (volume.Volume, error) {
// if the driver is unknown it probes all drivers until it finds the first volume with that name.
// it is expected that callers of this function hold any necessary locks
func (s *VolumeStore) getVolume(name string) (volume.Volume, error) {
labels := map[string]string{}
options := map[string]string{}
var meta volumeMetadata
meta, err := s.getMeta(name)
if err != nil {
return nil, err
}
if s.db != nil {
// get meta
if err := s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(volumeBucketName))
data := b.Get([]byte(name))
if string(data) == "" {
return nil
driverName := meta.Driver
if driverName == "" {
s.globalLock.RLock()
v, exists := s.names[name]
s.globalLock.RUnlock()
if exists {
meta.Driver = v.DriverName()
if err := s.setMeta(name, meta); err != nil {
return nil, err
}
var meta volumeMetadata
buf := bytes.NewBuffer(data)
if err := json.NewDecoder(buf).Decode(&meta); err != nil {
return err
}
labels = meta.Labels
options = meta.Options
return nil
}); err != nil {
return nil, err
}
}
logrus.Debugf("Getting volume reference for name: %s", name)
s.globalLock.RLock()
v, exists := s.names[name]
s.globalLock.RUnlock()
if exists {
vd, err := volumedrivers.GetDriver(v.DriverName())
if meta.Driver != "" {
vol, err := lookupVolume(meta.Driver, name)
if err != nil {
return nil, err
}
vol, err := vd.Get(name)
if err != nil {
return nil, err
if vol == nil {
s.Purge(name)
return nil, errNoSuchVolume
}
return volumeWrapper{vol, labels, vd.Scope(), options}, nil
var scope string
vd, err := volumedrivers.GetDriver(meta.Driver)
if err == nil {
scope = vd.Scope()
}
return volumeWrapper{vol, meta.Labels, scope, meta.Options}, nil
}
logrus.Debugf("Probing all drivers for volume with name: %s", name)
@ -523,15 +493,42 @@ func (s *VolumeStore) getVolume(name string) (volume.Volume, error) {
for _, d := range drivers {
v, err := d.Get(name)
if err != nil {
if err != nil || v == nil {
continue
}
return volumeWrapper{v, labels, d.Scope(), options}, nil
meta.Driver = v.DriverName()
if err := s.setMeta(name, meta); err != nil {
return nil, err
}
return volumeWrapper{v, meta.Labels, d.Scope(), meta.Options}, nil
}
return nil, errNoSuchVolume
}
// lookupVolume gets the specified volume from the specified driver.
// This will only return errors related to communications with the driver.
// If the driver returns an error that is not communication related the
// error is logged but not returned.
// If the volume is not found it will return `nil, nil``
func lookupVolume(driverName, volumeName string) (volume.Volume, error) {
vd, err := volumedrivers.GetDriver(driverName)
if err != nil {
return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName)
}
v, err := vd.Get(volumeName)
if err != nil {
err = errors.Cause(err)
if _, ok := err.(net.Error); ok {
return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", v.Name(), v.DriverName())
}
// At this point, the error could be anything from the driver, such as "no such volume"
// Let's not check an error here, and instead check if the driver returned a volume
logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Warnf("Error while looking up volume")
}
return v, nil
}
// Remove removes the requested volume. A volume is not removed if it has any refs
func (s *VolumeStore) Remove(v volume.Volume) error {
name := normaliseVolumeName(v.Name())
@ -543,7 +540,7 @@ func (s *VolumeStore) Remove(v volume.Volume) error {
return &OpErr{Err: errVolumeInUse, Name: v.Name(), Op: "remove", Refs: refs}
}
vd, err := volumedrivers.RemoveDriver(v.DriverName())
vd, err := volumedrivers.GetDriver(v.DriverName())
if err != nil {
return &OpErr{Err: err, Name: vd.Name(), Op: "remove"}
}
@ -644,3 +641,9 @@ func unwrapVolume(v volume.Volume) volume.Volume {
return v
}
// Shutdown releases all resources used by the volume store
// It does not make any changes to volumes, drivers, etc.
func (s *VolumeStore) Shutdown() error {
return s.db.Close()
}

View File

@ -2,6 +2,8 @@ package store
import (
"errors"
"io/ioutil"
"os"
"strings"
"testing"
@ -16,7 +18,13 @@ func TestCreate(t *testing.T) {
volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake")
defer volumedrivers.Unregister("fake")
s, err := New("")
dir, err := ioutil.TempDir("", "test-create")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
s, err := New(dir)
if err != nil {
t.Fatal(err)
}
@ -47,7 +55,12 @@ func TestRemove(t *testing.T) {
volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop")
defer volumedrivers.Unregister("fake")
defer volumedrivers.Unregister("noop")
s, err := New("")
dir, err := ioutil.TempDir("", "test-remove")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
s, err := New(dir)
if err != nil {
t.Fatal(err)
}
@ -80,8 +93,13 @@ func TestList(t *testing.T) {
volumedrivers.Register(volumetestutils.NewFakeDriver("fake2"), "fake2")
defer volumedrivers.Unregister("fake")
defer volumedrivers.Unregister("fake2")
dir, err := ioutil.TempDir("", "test-list")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
s, err := New("")
s, err := New(dir)
if err != nil {
t.Fatal(err)
}
@ -99,9 +117,12 @@ func TestList(t *testing.T) {
if len(ls) != 2 {
t.Fatalf("expected 2 volumes, got: %d", len(ls))
}
if err := s.Shutdown(); err != nil {
t.Fatal(err)
}
// and again with a new store
s, err = New("")
s, err = New(dir)
if err != nil {
t.Fatal(err)
}
@ -119,7 +140,11 @@ func TestFilterByDriver(t *testing.T) {
volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop")
defer volumedrivers.Unregister("fake")
defer volumedrivers.Unregister("noop")
s, err := New("")
dir, err := ioutil.TempDir("", "test-filter-driver")
if err != nil {
t.Fatal(err)
}
s, err := New(dir)
if err != nil {
t.Fatal(err)
}
@ -146,8 +171,12 @@ func TestFilterByDriver(t *testing.T) {
func TestFilterByUsed(t *testing.T) {
volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake")
volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop")
dir, err := ioutil.TempDir("", "test-filter-used")
if err != nil {
t.Fatal(err)
}
s, err := New("")
s, err := New(dir)
if err != nil {
t.Fatal(err)
}
@ -183,8 +212,12 @@ func TestFilterByUsed(t *testing.T) {
func TestDerefMultipleOfSameRef(t *testing.T) {
volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake")
dir, err := ioutil.TempDir("", "test-same-deref")
if err != nil {
t.Fatal(err)
}
s, err := New("")
s, err := New(dir)
if err != nil {
t.Fatal(err)
}