1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Split volumes out from daemon

Docker-DCO-1.1-Signed-off-by: Brian Goff <cpuguy83@gmail.com> (github: cpuguy83)
This commit is contained in:
Brian Goff 2014-08-28 10:18:08 -04:00
parent ba726d9148
commit 45407cf00a
12 changed files with 537 additions and 358 deletions

View file

@ -618,9 +618,10 @@ func (b *Builder) clearTmp() {
tmp := b.Daemon.Get(c) tmp := b.Daemon.Get(c)
if err := b.Daemon.Destroy(tmp); err != nil { if err := b.Daemon.Destroy(tmp); err != nil {
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
} else { return
delete(b.TmpContainers, c)
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
} }
b.Daemon.DeleteVolumes(tmp.VolumePaths())
delete(b.TmpContainers, c)
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
} }
} }

View file

@ -79,6 +79,9 @@ type Container struct {
MountLabel, ProcessLabel string MountLabel, ProcessLabel string
RestartCount int RestartCount int
// Maps container paths to volume paths. The key in this is the path to which
// the volume is being mounted inside the container. Value is the path of the
// volume on disk
Volumes map[string]string Volumes map[string]string
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
// Easier than migrating older container configs :) // Easier than migrating older container configs :)
@ -309,7 +312,7 @@ func (container *Container) Start() (err error) {
return err return err
} }
container.verifyDaemonSettings() container.verifyDaemonSettings()
if err := prepareVolumesForContainer(container); err != nil { if err := container.prepareVolumes(); err != nil {
return err return err
} }
linkedEnv, err := container.setupLinkedContainers() linkedEnv, err := container.setupLinkedContainers()
@ -323,7 +326,7 @@ func (container *Container) Start() (err error) {
if err := populateCommand(container, env); err != nil { if err := populateCommand(container, env); err != nil {
return err return err
} }
if err := setupMountsForContainer(container); err != nil { if err := container.setupMounts(); err != nil {
return err return err
} }
@ -1183,47 +1186,3 @@ func (container *Container) getNetworkedContainer() (*Container, error) {
return nil, fmt.Errorf("network mode not set to container") return nil, fmt.Errorf("network mode not set to container")
} }
} }
func (container *Container) GetVolumes() (map[string]*Volume, error) {
// Get all the bind-mounts
volumes, err := container.getBindMap()
if err != nil {
return nil, err
}
// Get all the normal volumes
for volPath, hostPath := range container.Volumes {
if _, exists := volumes[volPath]; exists {
continue
}
volumes[volPath] = &Volume{VolPath: volPath, HostPath: hostPath, isReadWrite: container.VolumesRW[volPath]}
}
return volumes, nil
}
func (container *Container) getBindMap() (map[string]*Volume, error) {
var (
// Create the requested bind mounts
volumes = map[string]*Volume{}
// Define illegal container destinations
illegalDsts = []string{"/", "."}
)
for _, bind := range container.hostConfig.Binds {
vol, err := parseBindVolumeSpec(bind)
if err != nil {
return nil, err
}
vol.isBindMount = true
// Bail if trying to mount to an illegal destination
for _, illegal := range illegalDsts {
if vol.VolPath == illegal {
return nil, fmt.Errorf("Illegal bind destination: %s", vol.VolPath)
}
}
volumes[filepath.Clean(vol.VolPath)] = &vol
}
return volumes, nil
}

View file

@ -38,6 +38,7 @@ import (
"github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
"github.com/docker/docker/volumes"
) )
var ( var (
@ -90,7 +91,7 @@ type Daemon struct {
repositories *graph.TagStore repositories *graph.TagStore
idIndex *truncindex.TruncIndex idIndex *truncindex.TruncIndex
sysInfo *sysinfo.SysInfo sysInfo *sysinfo.SysInfo
volumes *graph.Graph volumes *volumes.Repository
eng *engine.Engine eng *engine.Engine
config *Config config *Config
containerGraph *graphdb.Database containerGraph *graphdb.Database
@ -382,6 +383,12 @@ func (daemon *Daemon) restore() error {
} }
} }
for _, c := range registeredContainers {
for _, mnt := range c.VolumeMounts() {
daemon.volumes.Add(mnt.volume)
}
}
if !debug { if !debug {
log.Infof(": done.") log.Infof(": done.")
} }
@ -789,17 +796,16 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
return nil, err return nil, err
} }
// We don't want to use a complex driver like aufs or devmapper
// for volumes, just a plain filesystem
volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions) volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions)
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.Debugf("Creating volumes graph")
volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver) volumes, err := volumes.NewRepository(path.Join(config.Root, "volumes"), volumesDriver)
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.Debugf("Creating repository list") log.Debugf("Creating repository list")
repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, config.Mirrors) repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, config.Mirrors)
if err != nil { if err != nil {
@ -1028,10 +1034,6 @@ func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
return daemon.execDriver return daemon.execDriver
} }
func (daemon *Daemon) Volumes() *graph.Graph {
return daemon.volumes
}
func (daemon *Daemon) ContainerGraph() *graphdb.Database { func (daemon *Daemon) ContainerGraph() *graphdb.Database {
return daemon.containerGraph return daemon.containerGraph
} }

View file

@ -4,8 +4,6 @@ import (
"fmt" "fmt"
"os" "os"
"path" "path"
"path/filepath"
"strings"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/log"
@ -21,10 +19,11 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
forceRemove := job.GetenvBool("forceRemove") forceRemove := job.GetenvBool("forceRemove")
container := daemon.Get(name) container := daemon.Get(name)
if container == nil {
job.Errorf("No such container: %s", name)
}
if removeLink { if removeLink {
if container == nil {
return job.Errorf("No such link: %s", name)
}
name, err := GetFullContainerName(name) name, err := GetFullContainerName(name)
if err != nil { if err != nil {
job.Error(err) job.Error(err)
@ -63,73 +62,22 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
return job.Errorf("Cannot destroy container %s: %s", name, err) return job.Errorf("Cannot destroy container %s: %s", name, err)
} }
container.LogEvent("destroy") container.LogEvent("destroy")
if removeVolume { if removeVolume {
var ( daemon.DeleteVolumes(container.VolumePaths())
volumes = make(map[string]struct{})
binds = make(map[string]struct{})
usedVolumes = make(map[string]*Container)
)
// the volume id is always the base of the path
getVolumeId := func(p string) string {
return filepath.Base(strings.TrimSuffix(p, "/layer"))
}
// populate bind map so that they can be skipped and not removed
for _, bind := range container.HostConfig().Binds {
source := strings.Split(bind, ":")[0]
// TODO: refactor all volume stuff, all of it
// it is very important that we eval the link or comparing the keys to container.Volumes will not work
//
// eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
p, err := filepath.EvalSymlinks(source)
if err != nil && !os.IsNotExist(err) {
return job.Error(err)
}
if p != "" {
source = p
}
binds[source] = struct{}{}
}
// Store all the deleted containers volumes
for _, volumeId := range container.Volumes {
// Skip the volumes mounted from external
// bind mounts here will will be evaluated for a symlink
if _, exists := binds[volumeId]; exists {
continue
}
volumeId = getVolumeId(volumeId)
volumes[volumeId] = struct{}{}
}
// Retrieve all volumes from all remaining containers
for _, container := range daemon.List() {
for _, containerVolumeId := range container.Volumes {
containerVolumeId = getVolumeId(containerVolumeId)
usedVolumes[containerVolumeId] = container
}
}
for volumeId := range volumes {
// If the requested volu
if c, exists := usedVolumes[volumeId]; exists {
log.Infof("The volume %s is used by the container %s. Impossible to remove it. Skipping.", volumeId, c.ID)
continue
}
if err := daemon.Volumes().Delete(volumeId); err != nil {
return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
}
}
} }
} else {
return job.Errorf("No such container: %s", name)
} }
return engine.StatusOK return engine.StatusOK
} }
func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
for id := range volumeIDs {
if err := daemon.volumes.Delete(id); err != nil {
log.Infof("%s", err)
continue
}
}
}
// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem. // Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
// FIXME: rename to Rm for consistency with the CLI command // FIXME: rename to Rm for consistency with the CLI command
func (daemon *Daemon) Destroy(container *Container) error { func (daemon *Daemon) Destroy(container *Container) error {
@ -149,7 +97,7 @@ func (daemon *Daemon) Destroy(container *Container) error {
// Deregister the container before removing its directory, to avoid race conditions // Deregister the container before removing its directory, to avoid race conditions
daemon.idIndex.Delete(container.ID) daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID) daemon.containers.Delete(container.ID)
container.derefVolumes()
if _, err := daemon.containerGraph.Purge(container.ID); err != nil { if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
log.Debugf("Unable to remove container from link graph: %s", err) log.Debugf("Unable to remove container from link graph: %s", err)
} }

View file

@ -11,63 +11,199 @@ import (
"github.com/docker/docker/archive" "github.com/docker/docker/archive"
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/log"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/volumes"
) )
type Volume struct { type Mount struct {
HostPath string MountToPath string
VolPath string container *Container
isReadWrite bool volume *volumes.Volume
isBindMount bool Writable bool
} }
func (v *Volume) isDir() (bool, error) { func (container *Container) prepareVolumes() error {
stat, err := os.Stat(v.HostPath)
if err != nil {
return false, err
}
return stat.IsDir(), nil
}
func prepareVolumesForContainer(container *Container) error {
if container.Volumes == nil || len(container.Volumes) == 0 { if container.Volumes == nil || len(container.Volumes) == 0 {
container.Volumes = make(map[string]string) container.Volumes = make(map[string]string)
container.VolumesRW = make(map[string]bool) container.VolumesRW = make(map[string]bool)
if err := applyVolumesFrom(container); err != nil { if err := container.applyVolumesFrom(); err != nil {
return err return err
} }
} }
return createVolumes(container) return container.createVolumes()
} }
func setupMountsForContainer(container *Container) error { // sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order
func (container *Container) sortedVolumeMounts() []string {
var mountPaths []string
for path := range container.Volumes {
mountPaths = append(mountPaths, path)
}
sort.Strings(mountPaths)
return mountPaths
}
func (container *Container) createVolumes() error {
mounts, err := container.parseVolumeMountConfig()
if err != nil {
return err
}
for _, mnt := range mounts {
if err := mnt.initialize(); err != nil {
return err
}
}
return nil
}
func (m *Mount) initialize() error {
// No need to initialize anything since it's already been initialized
if _, exists := m.container.Volumes[m.MountToPath]; exists {
return nil
}
// This is the full path to container fs + mntToPath
containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(m.container.basefs, m.MountToPath), m.container.basefs)
if err != nil {
return err
}
m.container.VolumesRW[m.MountToPath] = m.Writable
m.container.Volumes[m.MountToPath] = m.volume.Path
m.volume.AddContainer(m.container.ID)
if m.Writable && !m.volume.IsBindMount {
// Copy whatever is in the container at the mntToPath to the volume
copyExistingContents(containerMntPath, m.volume.Path)
}
return nil
}
func (container *Container) VolumePaths() map[string]struct{} {
var paths = make(map[string]struct{})
for _, path := range container.Volumes {
paths[path] = struct{}{}
}
return paths
}
func (container *Container) derefVolumes() {
for path := range container.VolumePaths() {
vol := container.daemon.volumes.Get(path)
if vol == nil {
log.Debugf("Volume %s was not found and could not be dereferenced", path)
continue
}
vol.RemoveContainer(container.ID)
}
}
func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error) {
var mounts = make(map[string]*Mount)
// Get all the bind mounts
for _, spec := range container.hostConfig.Binds {
path, mountToPath, writable, err := parseBindMountSpec(spec)
if err != nil {
return nil, err
}
// Check if a volume already exists for this and use it
vol := container.daemon.volumes.Get(path)
if vol == nil {
vol, err = container.daemon.volumes.NewVolume(path, writable)
if err != nil {
return nil, err
}
}
mounts[mountToPath] = &Mount{container: container, volume: vol, MountToPath: mountToPath, Writable: writable}
}
// Get the rest of the volumes
for path := range container.Config.Volumes {
// Check if this is already added as a bind-mount
if _, exists := mounts[path]; exists {
continue
}
vol, err := container.daemon.volumes.NewVolume("", true)
if err != nil {
return nil, err
}
mounts[path] = &Mount{container: container, MountToPath: path, volume: vol, Writable: true}
}
return mounts, nil
}
func parseBindMountSpec(spec string) (string, string, bool, error) {
var (
path, mountToPath string
writable bool
arr = strings.Split(spec, ":")
)
switch len(arr) {
case 2:
path = arr[0]
mountToPath = arr[1]
writable = true
case 3:
path = arr[0]
mountToPath = arr[1]
writable = validMountMode(arr[2]) && arr[2] == "rw"
default:
return "", "", false, fmt.Errorf("Invalid volume specification: %s", spec)
}
if !filepath.IsAbs(path) {
return "", "", false, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", path)
}
return path, mountToPath, writable, nil
}
func (container *Container) applyVolumesFrom() error {
volumesFrom := container.hostConfig.VolumesFrom
for _, spec := range volumesFrom {
mounts, err := parseVolumesFromSpec(container.daemon, spec)
if err != nil {
return err
}
for _, mnt := range mounts {
mnt.container = container
if err = mnt.initialize(); err != nil {
return err
}
}
}
return nil
}
func validMountMode(mode string) bool {
validModes := map[string]bool{
"rw": true,
"ro": true,
}
return validModes[mode]
}
func (container *Container) setupMounts() error {
mounts := []execdriver.Mount{ mounts := []execdriver.Mount{
{ {Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true},
Source: container.ResolvConfPath,
Destination: "/etc/resolv.conf",
Writable: true,
Slave: true,
},
} }
if container.HostnamePath != "" { if container.HostnamePath != "" {
mounts = append(mounts, execdriver.Mount{ mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true})
Source: container.HostnamePath,
Destination: "/etc/hostname",
Writable: true,
Private: true,
})
} }
if container.HostsPath != "" { if container.HostsPath != "" {
mounts = append(mounts, execdriver.Mount{ mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true})
Source: container.HostsPath,
Destination: "/etc/hosts",
Writable: true,
Slave: true,
})
} }
// Mount user specified volumes // Mount user specified volumes
@ -87,219 +223,46 @@ func setupMountsForContainer(container *Container) error {
return nil return nil
} }
// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order func parseVolumesFromSpec(daemon *Daemon, spec string) (map[string]*Mount, error) {
func (container *Container) sortedVolumeMounts() []string {
var mountPaths []string
for path := range container.Volumes {
mountPaths = append(mountPaths, path)
}
sort.Strings(mountPaths)
return mountPaths
}
func parseVolumesFromSpec(container *Container, spec string) (map[string]*Volume, error) {
specParts := strings.SplitN(spec, ":", 2) specParts := strings.SplitN(spec, ":", 2)
if len(specParts) == 0 { if len(specParts) == 0 {
return nil, fmt.Errorf("Malformed volumes-from specification: %s", spec) return nil, fmt.Errorf("Malformed volumes-from specification: %s", spec)
} }
c := container.daemon.Get(specParts[0]) c := daemon.Get(specParts[0])
if c == nil { if c == nil {
return nil, fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0]) return nil, fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0])
} }
volumes, err := c.GetVolumes() mounts := c.VolumeMounts()
if err != nil {
return nil, err
}
if len(specParts) == 2 { if len(specParts) == 2 {
mode := specParts[1] mode := specParts[1]
if validVolumeMode(mode) { if validMountMode(mode) {
return nil, fmt.Errorf("Invalid mode for volumes-from: %s", mode) return nil, fmt.Errorf("Invalid mode for volumes-from: %s", mode)
} }
// Set the mode for the inheritted volume // Set the mode for the inheritted volume
for _, v := range volumes { for _, mnt := range mounts {
v.isReadWrite = mode != "ro" // Ensure that if the inherited volume is not writable, that we don't make
// it writable here
mnt.Writable = mnt.Writable && (mode == "rw")
} }
} }
return volumes, nil return mounts, nil
} }
func applyVolumesFrom(container *Container) error { func (container *Container) VolumeMounts() map[string]*Mount {
volumesFrom := container.hostConfig.VolumesFrom mounts := make(map[string]*Mount)
for _, spec := range volumesFrom { for mountToPath, path := range container.Volumes {
volumes, err := parseVolumesFromSpec(container, spec) if v := container.daemon.volumes.Get(path); v != nil {
if err != nil { mounts[mountToPath] = &Mount{volume: v, container: container, MountToPath: mountToPath, Writable: container.VolumesRW[mountToPath]}
return err
}
for _, v := range volumes {
if err = v.initialize(container); err != nil {
return err
}
}
}
return nil
}
func validVolumeMode(mode string) bool {
validModes := map[string]bool{
"rw": true,
"ro": true,
}
return validModes[mode]
}
func parseBindVolumeSpec(spec string) (Volume, error) {
var (
arr = strings.Split(spec, ":")
vol Volume
)
switch len(arr) {
case 1:
vol.VolPath = spec
vol.isReadWrite = true
case 2:
vol.HostPath = arr[0]
vol.VolPath = arr[1]
vol.isReadWrite = true
case 3:
vol.HostPath = arr[0]
vol.VolPath = arr[1]
vol.isReadWrite = validVolumeMode(arr[2]) && arr[2] == "rw"
default:
return vol, fmt.Errorf("Invalid volume specification: %s", spec)
}
if !filepath.IsAbs(vol.HostPath) {
return vol, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", vol.HostPath)
}
return vol, nil
}
func createVolumes(container *Container) error {
// Get all the bindmounts
volumes, err := container.GetVolumes()
if err != nil {
return err
}
// Get all the rest of the volumes
for volPath := range container.Config.Volumes {
// Make sure the the volume isn't already specified as a bindmount
if _, exists := volumes[volPath]; !exists {
volumes[volPath] = &Volume{
VolPath: volPath,
isReadWrite: true,
isBindMount: false,
}
} }
} }
for _, vol := range volumes { return mounts
if err = vol.initialize(container); err != nil {
return err
}
}
return nil
}
func createVolumeHostPath(container *Container) (string, error) {
volumesDriver := container.daemon.volumes.Driver()
// Do not pass a container as the parameter for the volume creation.
// The graph driver using the container's information ( Image ) to
// create the parent.
c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil)
if err != nil {
return "", err
}
hostPath, err := volumesDriver.Get(c.ID, "")
if err != nil {
return hostPath, fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
}
return hostPath, nil
}
func (v *Volume) initialize(container *Container) error {
var err error
v.VolPath = filepath.Clean(v.VolPath)
// Do not initialize an existing volume
if _, exists := container.Volumes[v.VolPath]; exists {
return nil
}
// If it's not a bindmount we need to create the dir on the host
if !v.isBindMount && v.HostPath == "" {
v.HostPath, err = createVolumeHostPath(container)
if err != nil {
return err
}
}
hostPath, err := filepath.EvalSymlinks(v.HostPath)
if err != nil {
return err
}
// Create the mountpoint
// This is the path to the volume within the container FS
// This differs from `hostPath` in that `hostPath` refers to the place where
// the volume data is actually stored on the host
fullVolPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, v.VolPath), container.basefs)
if err != nil {
return err
}
container.Volumes[v.VolPath] = hostPath
container.VolumesRW[v.VolPath] = v.isReadWrite
volIsDir, err := v.isDir()
if err != nil {
return err
}
if err := createIfNotExists(fullVolPath, volIsDir); err != nil {
return err
}
// Do not copy or change permissions if we are mounting from the host
if v.isReadWrite && !v.isBindMount {
return copyExistingContents(fullVolPath, hostPath)
}
return nil
}
func createIfNotExists(destination string, isDir bool) error {
if _, err := os.Stat(destination); err == nil || !os.IsNotExist(err) {
return nil
}
if isDir {
return os.MkdirAll(destination, 0755)
}
if err := os.MkdirAll(filepath.Dir(destination), 0755); err != nil {
return err
}
f, err := os.OpenFile(destination, os.O_CREATE, 0755)
if err != nil {
return err
}
f.Close()
return nil
} }
func copyExistingContents(source, destination string) error { func copyExistingContents(source, destination string) error {

View file

@ -1463,7 +1463,7 @@ func TestBuildWithVolumeOwnership(t *testing.T) {
cmd := exec.Command(dockerBinary, "run", "--rm", "testbuildimg", "ls", "-la", "/test") cmd := exec.Command(dockerBinary, "run", "--rm", "testbuildimg", "ls", "-la", "/test")
out, _, err := runCommandWithOutput(cmd) out, _, err := runCommandWithOutput(cmd)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(out, err)
} }
if expected := "drw-------"; !strings.Contains(out, expected) { if expected := "drw-------"; !strings.Contains(out, expected) {

View file

@ -122,7 +122,7 @@ func TestCommitWithHostBindMount(t *testing.T) {
cmd = exec.Command(dockerBinary, "commit", "bind-commit", "bindtest") cmd = exec.Command(dockerBinary, "commit", "bind-commit", "bindtest")
imageId, _, err := runCommandWithOutput(cmd) imageId, _, err := runCommandWithOutput(cmd)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(imageId, err)
} }
imageId = strings.Trim(imageId, "\r\n") imageId = strings.Trim(imageId, "\r\n")

View file

@ -18,8 +18,8 @@ func TestRmContainerWithRemovedVolume(t *testing.T) {
} }
cmd = exec.Command(dockerBinary, "rm", "-v", "losemyvolumes") cmd = exec.Command(dockerBinary, "rm", "-v", "losemyvolumes")
if _, err := runCommand(cmd); err != nil { if out, _, err := runCommandWithOutput(cmd); err != nil {
t.Fatal(err) t.Fatal(out, err)
} }
deleteAllContainers() deleteAllContainers()

View file

@ -398,16 +398,25 @@ func TestRunVolumesFromInReadWriteMode(t *testing.T) {
logDone("run - volumes from as read write mount") logDone("run - volumes from as read write mount")
} }
func TestRunVolumesFromInheritsReadOnly(t *testing.T) { func TestVolumesFromGetsProperMode(t *testing.T) {
cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test:/test:ro", "busybox", "true") cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test:/test:ro", "busybox", "true")
if _, err := runCommand(cmd); err != nil { if _, err := runCommand(cmd); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Expect this "rw" mode to be be ignored since the inheritted volume is "ro" // Expect this "rw" mode to be be ignored since the inheritted volume is "ro"
cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file")
if _, err := runCommand(cmd); err == nil { if _, err := runCommand(cmd); err == nil {
t.Fatal("Expected to inherit read-only volume even when passing in `rw`") t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`")
}
cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/test:/test:ro", "busybox", "true")
if _, err := runCommand(cmd); err != nil {
t.Fatal(err)
}
// Expect this to be read-only since both are "ro"
cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent2:ro", "busybox", "touch", "/test/file")
if _, err := runCommand(cmd); err == nil {
t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`")
} }
deleteAllContainers() deleteAllContainers()
@ -423,8 +432,8 @@ func TestRunApplyVolumesFromBeforeVolumes(t *testing.T) {
} }
cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo") cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo")
if _, err := runCommand(cmd); err != nil { if out, _, err := runCommandWithOutput(cmd); err != nil {
t.Fatal(err) t.Fatal(out, err)
} }
deleteAllContainers() deleteAllContainers()

1
volumes/MAINTAINERS Normal file
View file

@ -0,0 +1 @@
Brian Goff <cpuguy83@gmail.com> (@cpuguy83)

169
volumes/repository.go Normal file
View file

@ -0,0 +1,169 @@
package volumes
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/utils"
)
type Repository struct {
configPath string
driver graphdriver.Driver
volumes map[string]*Volume
lock sync.Mutex
}
func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) {
abspath, err := filepath.Abs(configPath)
if err != nil {
return nil, err
}
// Create the config path
if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
repo := &Repository{
driver: driver,
configPath: abspath,
volumes: make(map[string]*Volume),
}
return repo, repo.restore()
}
func (r *Repository) NewVolume(path string, writable bool) (*Volume, error) {
var (
isBindMount bool
err error
id = utils.GenerateRandomID()
)
if path != "" {
isBindMount = true
}
if path == "" {
path, err = r.createNewVolumePath(id)
if err != nil {
return nil, err
}
}
path, err = filepath.EvalSymlinks(path)
if err != nil {
return nil, err
}
v := &Volume{
ID: id,
Path: path,
repository: r,
Writable: writable,
Containers: make(map[string]struct{}),
configPath: r.configPath + "/" + id,
IsBindMount: isBindMount,
}
if err := v.initialize(); err != nil {
return nil, err
}
if err := r.Add(v); err != nil {
return nil, err
}
return v, nil
}
func (r *Repository) restore() error {
dir, err := ioutil.ReadDir(r.configPath)
if err != nil {
return err
}
var ids []string
for _, v := range dir {
id := v.Name()
if r.driver.Exists(id) {
ids = append(ids, id)
}
}
return nil
}
func (r *Repository) Get(path string) *Volume {
r.lock.Lock()
vol := r.volumes[path]
r.lock.Unlock()
return vol
}
func (r *Repository) get(path string) *Volume {
return r.volumes[path]
}
func (r *Repository) Add(volume *Volume) error {
r.lock.Lock()
defer r.lock.Unlock()
if vol := r.get(volume.Path); vol != nil {
return fmt.Errorf("Volume exists: %s", volume.ID)
}
r.volumes[volume.Path] = volume
return nil
}
func (r *Repository) Remove(volume *Volume) {
r.lock.Lock()
r.remove(volume)
r.lock.Unlock()
}
func (r *Repository) remove(volume *Volume) {
delete(r.volumes, volume.Path)
}
func (r *Repository) Delete(path string) error {
r.lock.Lock()
defer r.lock.Unlock()
volume := r.get(path)
if volume == nil {
return fmt.Errorf("Volume %s does not exist", path)
}
if volume.IsBindMount {
return fmt.Errorf("Volume %s is a bind-mount and cannot be removed", volume.Path)
}
if len(volume.Containers) > 0 {
return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, volume.Containers)
}
if err := os.RemoveAll(volume.configPath); err != nil {
return err
}
if err := r.driver.Remove(volume.ID); err != nil {
if !os.IsNotExist(err) {
return err
}
}
r.remove(volume)
return nil
}
func (r *Repository) createNewVolumePath(id string) (string, error) {
if err := r.driver.Create(id, ""); err != nil {
return "", err
}
path, err := r.driver.Get(id, "")
if err != nil {
return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", r.driver, id, err)
}
return path, nil
}

127
volumes/volume.go Normal file
View file

@ -0,0 +1,127 @@
package volumes
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/docker/docker/pkg/symlink"
)
type Volume struct {
ID string
Path string
IsBindMount bool
Writable bool
Containers map[string]struct{}
configPath string
repository *Repository
lock sync.Mutex
}
func (v *Volume) IsDir() (bool, error) {
stat, err := os.Stat(v.Path)
if err != nil {
return false, err
}
return stat.IsDir(), nil
}
func (v *Volume) RemoveContainer(containerId string) {
v.lock.Lock()
delete(v.Containers, containerId)
v.lock.Unlock()
}
func (v *Volume) AddContainer(containerId string) {
v.lock.Lock()
v.Containers[containerId] = struct{}{}
v.lock.Unlock()
}
func (v *Volume) createIfNotExist() error {
if stat, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) {
if stat.IsDir() {
os.MkdirAll(v.Path, 0755)
}
if err := os.MkdirAll(filepath.Dir(v.Path), 0755); err != nil {
return err
}
f, err := os.OpenFile(v.Path, os.O_CREATE, 0755)
if err != nil {
return err
}
f.Close()
}
return nil
}
func (v *Volume) initialize() error {
v.lock.Lock()
defer v.lock.Unlock()
if err := v.createIfNotExist(); err != nil {
return err
}
if err := os.MkdirAll(v.configPath, 0755); err != nil {
return err
}
jsonPath, err := v.jsonPath()
if err != nil {
return err
}
f, err := os.Create(jsonPath)
if err != nil {
return err
}
defer f.Close()
return v.toDisk()
}
func (v *Volume) ToDisk() error {
v.lock.Lock()
defer v.lock.Unlock()
return v.toDisk()
}
func (v *Volume) toDisk() error {
data, err := json.Marshal(v)
if err != nil {
return err
}
pth, err := v.jsonPath()
if err != nil {
return err
}
return ioutil.WriteFile(pth, data, 0666)
}
func (v *Volume) FromDisk() error {
v.lock.Lock()
defer v.lock.Unlock()
pth, err := v.jsonPath()
if err != nil {
return err
}
data, err := ioutil.ReadFile(pth)
if err != nil {
return err
}
return json.Unmarshal(data, v)
}
func (v *Volume) jsonPath() (string, error) {
return v.getRootResourcePath("config.json")
}
func (v *Volume) getRootResourcePath(path string) (string, error) {
cleanPath := filepath.Join("/", path)
return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath)
}