1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/daemon/volumes.go
Brian Goff d85b9f8580 Fix loading of containerized plugins
During daemon startup, all containers are registered before any are
started.
During container registration it was calling out to initialize volumes.
If the volume uses a plugin that is running in a container, this will
cause the restart of that container to fail since the plugin is not yet
running.
This also slowed down daemon startup since volume initialization was
happening sequentially, which can be slow (and is flat out slow since
initialization would fail but take 8 seconds for each volume to do it).

This fix holds off on volume initialization until after containers are
restarted and does the initialization in parallel.

The containers that are restarted will have thier volumes initialized
because they are being started. If any of these containers are using a
plugin they will just keep retrying to reach the plugin (up to the
timeout, which is 8seconds) until the container with the plugin is up
and running.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2016-01-20 12:23:17 -05:00

168 lines
4.7 KiB
Go

package daemon
import (
"errors"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/execdriver"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/volume"
"github.com/docker/engine-api/types"
containertypes "github.com/docker/engine-api/types/container"
"github.com/opencontainers/runc/libcontainer/label"
)
var (
// ErrVolumeReadonly is used to signal an error when trying to copy data into
// a volume mount that is not writable.
ErrVolumeReadonly = errors.New("mounted volume is marked read-only")
)
type mounts []execdriver.Mount
// volumeToAPIType converts a volume.Volume to the type used by the remote API
func volumeToAPIType(v volume.Volume) *types.Volume {
return &types.Volume{
Name: v.Name(),
Driver: v.DriverName(),
Mountpoint: v.Path(),
}
}
// Len returns the number of mounts. Used in sorting.
func (m mounts) Len() int {
return len(m)
}
// Less returns true if the number of parts (a/b/c would be 3 parts) in the
// mount indexed by parameter 1 is less than that of the mount indexed by
// parameter 2. Used in sorting.
func (m mounts) Less(i, j int) bool {
return m.parts(i) < m.parts(j)
}
// Swap swaps two items in an array of mounts. Used in sorting
func (m mounts) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
// parts returns the number of parts in the destination of a mount. Used in sorting.
func (m mounts) parts(i int) int {
return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator))
}
// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
// It follows the next sequence to decide what to mount in each final destination:
//
// 1. Select the previously configured mount points for the containers, if any.
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
// 4. Cleanup old volumes that are about to be reassigned.
func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) error {
binds := map[string]bool{}
mountPoints := map[string]*volume.MountPoint{}
// 1. Read already configured mount points.
for name, point := range container.MountPoints {
mountPoints[name] = point
}
// 2. Read volumes from other containers.
for _, v := range hostConfig.VolumesFrom {
containerID, mode, err := volume.ParseVolumesFrom(v)
if err != nil {
return err
}
c, err := daemon.GetContainer(containerID)
if err != nil {
return err
}
for _, m := range c.MountPoints {
cp := &volume.MountPoint{
Name: m.Name,
Source: m.Source,
RW: m.RW && volume.ReadWrite(mode),
Driver: m.Driver,
Destination: m.Destination,
Propagation: m.Propagation,
}
if len(cp.Source) == 0 {
v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID)
if err != nil {
return err
}
cp.Volume = v
}
mountPoints[cp.Destination] = cp
}
}
// 3. Read bind mounts
for _, b := range hostConfig.Binds {
// #10618
bind, err := volume.ParseMountSpec(b, hostConfig.VolumeDriver)
if err != nil {
return err
}
if binds[bind.Destination] {
return derr.ErrorCodeMountDup.WithArgs(bind.Destination)
}
if len(bind.Name) > 0 && len(bind.Driver) > 0 {
// create the volume
v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil)
if err != nil {
return err
}
bind.Volume = v
bind.Source = v.Path()
// bind.Name is an already existing volume, we need to use that here
bind.Driver = v.DriverName()
bind = setBindModeIfNull(bind)
}
if label.RelabelNeeded(bind.Mode) {
if err := label.Relabel(bind.Source, container.MountLabel, label.IsShared(bind.Mode)); err != nil {
return err
}
}
binds[bind.Destination] = true
mountPoints[bind.Destination] = bind
}
container.Lock()
// 4. Cleanup old volumes that are about to be reassigned.
for _, m := range mountPoints {
if m.BackwardsCompatible() {
if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
daemon.volumes.Dereference(mp.Volume, container.ID)
}
}
}
container.MountPoints = mountPoints
container.Unlock()
return nil
}
// lazyInitializeVolume initializes a mountpoint's volume if needed.
// This happens after a daemon restart.
func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error {
if len(m.Driver) > 0 && m.Volume == nil {
v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID)
if err != nil {
return err
}
m.Volume = v
}
return nil
}