1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge branch 'master' into refactor_opts

Conflicts:
	commands.go
This commit is contained in:
Guillaume J. Charmes 2013-11-28 12:23:45 -08:00
commit 0c758e9312
No known key found for this signature in database
GPG key ID: B33E4642CB6E3FF3
12 changed files with 757 additions and 26 deletions

3
.gitignore vendored
View file

@ -1,3 +1,6 @@
# Docker project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
.vagrant*
bin
docker/docker

View file

@ -4,6 +4,13 @@ Want to hack on Docker? Awesome! Here are instructions to get you
started. They are probably not perfect, please let us know if anything
feels wrong or incomplete.
## Reporting Issues
When reporting [issues](https://github.com/dotcloud/docker/issues)
on Github please include your host OS ( Ubuntu 12.04, Fedora 19, etc... )
and the output of `docker version` along with the output of `docker info` if possible.
This information will help us review and fix your issue faster.
## Build Environment
For instructions on setting up your development environment, please

View file

@ -181,7 +181,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
oldStat.Rdev != newStat.Rdev ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) ||
oldStat.Mtim != newStat.Mtim {
getLastModification(oldStat) != getLastModification(newStat) {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,

View file

@ -83,8 +83,10 @@ func ApplyLayer(dest string, layer Archive) error {
}
for k, v := range modifiedDirs {
aTime := time.Unix(v.Atim.Unix())
mTime := time.Unix(v.Mtim.Unix())
lastAccess := getLastAccess(v)
lastModification := getLastModification(v)
aTime := time.Unix(lastAccess.Unix())
mTime := time.Unix(lastModification.Unix())
if err := os.Chtimes(k, aTime, mTime); err != nil {
return err

11
archive/stat_darwin.go Normal file
View file

@ -0,0 +1,11 @@
package archive
import "syscall"
func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
return stat.Atimespec
}
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
return stat.Mtimespec
}

11
archive/stat_linux.go Normal file
View file

@ -0,0 +1,11 @@
package archive
import "syscall"
func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
return stat.Atim
}
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
return stat.Mtim
}

157
commands_unit_test.go Normal file
View file

@ -0,0 +1,157 @@
package docker
import (
"strings"
"testing"
)
func parse(t *testing.T, args string) (*Config, *HostConfig, error) {
config, hostConfig, _, err := ParseRun(strings.Split(args+" ubuntu bash", " "), nil)
return config, hostConfig, err
}
func mustParse(t *testing.T, args string) (*Config, *HostConfig) {
config, hostConfig, err := parse(t, args)
if err != nil {
t.Fatal(err)
}
return config, hostConfig
}
func TestParseRunLinks(t *testing.T) {
if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
}
if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
}
if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
}
if _, _, err := parse(t, "-link a"); err == nil {
t.Fatalf("Error parsing links. `-link a` should be an error but is not")
}
if _, _, err := parse(t, "-link"); err == nil {
t.Fatalf("Error parsing links. `-link` should be an error but is not")
}
}
func TestParseRunAttach(t *testing.T) {
if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if _, _, err := parse(t, "-a"); err == nil {
t.Fatalf("Error parsing attach flags, `-a` should be an error but is not")
}
if _, _, err := parse(t, "-a invalid"); err == nil {
t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not")
}
if _, _, err := parse(t, "-a invalid -a stdout"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not")
}
if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not")
}
if _, _, err := parse(t, "-a stdin -d"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not")
}
if _, _, err := parse(t, "-a stdout -d"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not")
}
if _, _, err := parse(t, "-a stderr -d"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not")
}
if _, _, err := parse(t, "-d -rm"); err == nil {
t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not")
}
}
func TestParseRunVolumes(t *testing.T) {
if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil {
t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/tmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil {
t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/tmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes)
} else if _, exists := config.Volumes["/var"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
} else if _, exists := config.Volumes["/containerVar"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
} else if _, exists := config.Volumes["/containerVar"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
} else if _, exists := config.Volumes["/containerVar"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil {
t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds)
} else if len(config.Volumes) != 0 {
t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes)
}
mustParse(t, "-v /")
if _, _, err := parse(t, "-v /:/"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't")
}
if _, _, err := parse(t, "-v"); err == nil {
t.Fatalf("Error parsing volume flags, `-v` should fail but didn't")
}
if _, _, err := parse(t, "-v /tmp:"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't")
}
if _, _, err := parse(t, "-v /tmp:ro"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't")
}
if _, _, err := parse(t, "-v /tmp::"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't")
}
if _, _, err := parse(t, "-v :"); err == nil {
t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't")
}
if _, _, err := parse(t, "-v ::"); err == nil {
t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't")
}
if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't")
}
}

77
contrib/mkseccomp.pl Executable file
View file

@ -0,0 +1,77 @@
#!/usr/bin/perl
#
# A simple helper script to help people build seccomp profiles for
# Docker/LXC. The goal is mostly to reduce the attack surface to the
# kernel, by restricting access to rarely used, recently added or not used
# syscalls.
#
# This script processes one or more files which contain the list of system
# calls to be allowed. See mkseccomp.sample for more information how you
# can configure the list of syscalls. When run, this script produces output
# which, when stored in a file, can be passed to docker as follows:
#
# docker run -lxc-conf="lxc.seccomp=$file" <rest of arguments>
#
# The included sample file shows how to cut about a quarter of all syscalls,
# which affecting most applications.
#
# For specific situations it is possible to reduce the list further. By
# reducing the list to just those syscalls required by a certain application
# you can make it difficult for unknown/unexpected code to run.
#
# Run this script as follows:
#
# ./mkseccomp.pl < mkseccomp.sample >syscalls.list
# or
# ./mkseccomp.pl mkseccomp.sample >syscalls.list
#
# Multiple files can be specified, in which case the lists of syscalls are
# combined.
#
# By Martijn van Oosterhout <kleptog@svana.org> Nov 2013
# How it works:
#
# This program basically spawns two processes to form a chain like:
#
# <process data section to prefix __NR_> | cpp | <add header and filter unknown syscalls>
use strict;
use warnings;
if( -t ) {
print STDERR "Helper script to make seccomp filters for Docker/LXC.\n";
print STDERR "Usage: mkseccomp.pl [files...]\n";
exit 1;
}
my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n";
if($pid == 0) { # Child
$pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n";
if($pid == 0) { # Child, which execs cpp
exec "cpp" or die "Couldn't exec cpp ($!)\n";
exit 1;
}
# Process the DATA section and output to cpp
print $out "#include <sys/syscall.h>\n";
while(<>) {
if(/^\w/) {
print $out "__NR_$_";
}
}
close $out;
exit 0;
}
# Print header and then process output from cpp.
print "1\n";
print "whitelist\n";
while(<$in>) {
print if( /^[0-9]/ );
}

444
contrib/mkseccomp.sample Normal file
View file

@ -0,0 +1,444 @@
/* This sample file is an example for mkseccomp.pl to produce a seccomp file
* which restricts syscalls that are only useful for an admin but allows the
* vast majority of normal userspace programs to run normally.
*
* The format of this file is one line per syscall. This is then processed
* and passed to 'cpp' to convert the names to numbers using whatever is
* correct for your platform. As such C-style comments are permitted. Note
* this also means that C preprocessor macros are also allowed. So it is
* possible to create groups surrounded by #ifdef/#endif and control their
* inclusion via #define (not #include).
*
* Syscalls that don't exist on your architecture are silently filtered out.
* Syscalls marked with (*) are required for a container to spawn a bash
* shell successfully (not necessarily full featured). Listing the same
* syscall multiple times is no problem.
*
* If you want to make a list specifically for one application the easiest
* way is to run the application under strace, like so:
*
* $ strace -f -q -c -o strace.out application args...
*
* Once you have a reasonable sample of the execution of the program, exit
* it. The file strace.out will have a summary of the syscalls used. Copy
* that list into this file, comment out everything else except the starred
* syscalls (which you need for the container to start) and you're done.
*
* To get the list of syscalls from the strace output this works well for
* me
*
* $ cut -c52 < strace.out
*
* This sample list was compiled as a combination of all the syscalls
* available on i386 and amd64 on Ubuntu Precise, as such it may not contain
* everything and not everything may be relevent for your system. This
* shouldn't be a problem.
*/
// Filesystem/File descriptor related
access // (*)
chdir // (*)
chmod
chown
chown32
close // (*)
creat
dup // (*)
dup2 // (*)
dup3
epoll_create
epoll_create1
epoll_ctl
epoll_ctl_old
epoll_pwait
epoll_wait
epoll_wait_old
eventfd
eventfd2
faccessat // (*)
fadvise64
fadvise64_64
fallocate
fanotify_init
fanotify_mark
ioctl // (*)
fchdir
fchmod
fchmodat
fchown
fchown32
fchownat
fcntl // (*)
fcntl64
fdatasync
fgetxattr
flistxattr
flock
fremovexattr
fsetxattr
fstat // (*)
fstat64
fstatat64
fstatfs
fstatfs64
fsync
ftruncate
ftruncate64
getcwd // (*)
getdents // (*)
getdents64
getxattr
inotify_add_watch
inotify_init
inotify_init1
inotify_rm_watch
io_cancel
io_destroy
io_getevents
io_setup
io_submit
lchown
lchown32
lgetxattr
link
linkat
listxattr
llistxattr
llseek
_llseek
lremovexattr
lseek // (*)
lsetxattr
lstat
lstat64
mkdir
mkdirat
mknod
mknodat
newfstatat
_newselect
oldfstat
oldlstat
oldolduname
oldstat
olduname
oldwait4
open // (*)
openat // (*)
pipe // (*)
pipe2
poll
ppoll
pread64
preadv
futimesat
pselect6
pwrite64
pwritev
read // (*)
readahead
readdir
readlink
readlinkat
readv
removexattr
rename
renameat
rmdir
select
sendfile
sendfile64
setxattr
splice
stat // (*)
stat64
statfs // (*)
statfs64
symlink
symlinkat
sync
sync_file_range
sync_file_range2
syncfs
tee
truncate
truncate64
umask
unlink
unlinkat
ustat
utime
utimensat
utimes
write // (*)
writev
// Network related
accept
accept4
bind // (*)
connect // (*)
getpeername
getsockname // (*)
getsockopt
listen
recv
recvfrom // (*)
recvmmsg
recvmsg
send
sendmmsg
sendmsg
sendto // (*)
setsockopt
shutdown
socket // (*)
socketcall
socketpair
// Signal related
pause
rt_sigaction // (*)
rt_sigpending
rt_sigprocmask // (*)
rt_sigqueueinfo
rt_sigreturn // (*)
rt_sigsuspend
rt_sigtimedwait
rt_tgsigqueueinfo
sigaction
sigaltstack // (*)
signal
signalfd
signalfd4
sigpending
sigprocmask
sigreturn
sigsuspend
// Other needed POSIX
alarm
brk // (*)
clock_adjtime
clock_getres
clock_gettime
clock_nanosleep
//clock_settime
gettimeofday
nanosleep
nice
sysinfo
syslog
time
timer_create
timer_delete
timerfd_create
timerfd_gettime
timerfd_settime
timer_getoverrun
timer_gettime
timer_settime
times
uname // (*)
// Memory control
madvise
mbind
mincore
mlock
mlockall
mmap // (*)
mmap2
mprotect // (*)
mremap
msync
munlock
munlockall
munmap // (*)
remap_file_pages
set_mempolicy
vmsplice
// Process control
capget
//capset
clone // (*)
execve // (*)
exit // (*)
exit_group // (*)
fork
getcpu
getpgid
getpgrp // (*)
getpid // (*)
getppid // (*)
getpriority
getresgid
getresgid32
getresuid
getresuid32
getrlimit // (*)
getrusage
getsid
getuid // (*)
getuid32
getegid // (*)
getegid32
geteuid // (*)
geteuid32
getgid // (*)
getgid32
getgroups
getgroups32
getitimer
get_mempolicy
kill
//personality
prctl
prlimit64
sched_getaffinity
sched_getparam
sched_get_priority_max
sched_get_priority_min
sched_getscheduler
sched_rr_get_interval
//sched_setaffinity
//sched_setparam
//sched_setscheduler
sched_yield
setfsgid
setfsgid32
setfsuid
setfsuid32
setgid
setgid32
setgroups
setgroups32
setitimer
setpgid // (*)
setpriority
setregid
setregid32
setresgid
setresgid32
setresuid
setresuid32
setreuid
setreuid32
setrlimit
setsid
setuid
setuid32
ugetrlimit
vfork
wait4 // (*)
waitid
waitpid
// IPC
ipc
mq_getsetattr
mq_notify
mq_open
mq_timedreceive
mq_timedsend
mq_unlink
msgctl
msgget
msgrcv
msgsnd
semctl
semget
semop
semtimedop
shmat
shmctl
shmdt
shmget
// Linux specific, mostly needed for thread-related stuff
arch_prctl // (*)
get_robust_list
get_thread_area
gettid
futex // (*)
restart_syscall // (*)
set_robust_list // (*)
set_thread_area
set_tid_address // (*)
tgkill
tkill
// Admin syscalls, these are blocked
//acct
//adjtimex
//bdflush
//chroot
//create_module
//delete_module
//get_kernel_syms // Obsolete
//idle // Obsolete
//init_module
//ioperm
//iopl
//ioprio_get
//ioprio_set
//kexec_load
//lookup_dcookie // oprofile only?
//migrate_pages // NUMA
//modify_ldt
//mount
//move_pages // NUMA
//name_to_handle_at // NFS server
//nfsservctl // NFS server
//open_by_handle_at // NFS server
//perf_event_open
//pivot_root
//process_vm_readv // For debugger
//process_vm_writev // For debugger
//ptrace // For debugger
//query_module
//quotactl
//reboot
//setdomainname
//sethostname
//setns
//settimeofday
//sgetmask // Obsolete
//ssetmask // Obsolete
//stime
//swapoff
//swapon
//_sysctl
//sysfs
//sys_setaltroot
//umount
//umount2
//unshare
//uselib
//vhangup
//vm86
//vm86old
// Kernel key management
//add_key
//keyctl
//request_key
// Unimplemented
//afs_syscall
//break
//ftime
//getpmsg
//gtty
//lock
//madvise1
//mpx
//prof
//profil
//putpmsg
//security
//stty
//tuxcall
//ulimit
//vserver

View file

@ -5,7 +5,6 @@ So you're in charge of a Docker release? Cool. Here's what to do.
If your experience deviates from this document, please document the changes
to keep it up-to-date.
### 1. Pull from master and create a release branch
```bash
@ -13,6 +12,7 @@ export VERSION=vXXX
git checkout release
git pull
git checkout -b bump_$VERSION
git merge origin/master
```
### 2. Update CHANGELOG.md
@ -54,10 +54,14 @@ EXAMPLES:
### 3. Change the contents of the VERSION file
```bash
echo ${VERSION#v} > VERSION
```
### 4. Run all tests
```bash
docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh test
docker run -privileged docker hack/make.sh test
```
### 5. Test the docs
@ -79,8 +83,8 @@ git push origin bump_$VERSION
### 8. Apply tag
```bash
git tag -a v$VERSION # Don't forget the v!
git push --tags
git tag -a $VERSION -m $VERSION bump_$VERSION
git push origin $VERSION
```
Merging the pull request to the release branch will automatically
@ -91,6 +95,9 @@ documentation releases, see ``docs/README.md``
### 9. Go to github to merge the bump_$VERSION into release
Don't forget to push that pretty blue button to delete the leftover
branch afterwards!
### 10. Publish binaries
To run this you will need access to the release credentials.
@ -107,17 +114,19 @@ docker run \
-e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \
-e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \
-e GPG_PASSPHRASE=supersecretsesame \
-privileged -lxc-conf=lxc.aa_profile=unconfined \
-t -i \
-i -t -privileged \
docker \
hack/release.sh
```
It will build and upload the binaries on the specified bucket (you should
use test.docker.io for general testing, and once everything is fine,
switch to get.docker.io).
It will run the test suite one more time, build the binaries and packages,
and upload to the specified bucket (you should use test.docker.io for
general testing, and once everything is fine, switch to get.docker.io).
### 11. Rejoice!
### 11. Rejoice and Evangelize!
Congratulations! You're done.
Go forth and announce the glad tidings of the new release in `#docker`,
`#docker-dev`, on the [mailing list](https://groups.google.com/forum/#!forum/docker-dev),
and on Twitter!

View file

@ -422,7 +422,8 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
if _, err := runtime.containerGraph.Set(name, id); err != nil {
if strings.HasSuffix(err.Error(), "name are not unique") {
conflictingContainer, _ := runtime.GetByName(name)
return nil, nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, utils.TruncateID(conflictingContainer.ID), name)
nameAsKnownByUser := strings.TrimPrefix(name, "/")
return nil, nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser, utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser)
}
return nil, nil, err
}

View file

@ -985,7 +985,17 @@ func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *ut
if err != nil {
return err
}
if _, err := srv.poolAdd("pull", localName+":"+tag); err != nil {
out = utils.NewWriteFlusher(out)
c, err := srv.poolAdd("pull", localName+":"+tag)
if err != nil {
if c != nil {
// Another pull of the same repository is already taking place; just wait for it to finish
out.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
<-c
return nil
}
return err
}
defer srv.poolRemove("pull", localName+":"+tag)
@ -1001,7 +1011,6 @@ func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *ut
localName = remoteName
}
out = utils.NewWriteFlusher(out)
err = srv.pullRepository(r, out, localName, remoteName, tag, endpoint, sf, parallel)
if err == registry.ErrLoginRequired {
return err
@ -1408,19 +1417,15 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool)
var ErrImageReferenced = errors.New("Image referenced by a repository")
func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi) error {
func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi, byParents map[string][]*Image) error {
// If the image is referenced by a repo, do not delete
if len(srv.runtime.repositories.ByID()[id]) != 0 {
return ErrImageReferenced
}
// If the image is not referenced but has children, go recursive
referenced := false
byParents, err := srv.runtime.graph.ByParent()
if err != nil {
return err
}
for _, img := range byParents[id] {
if err := srv.deleteImageAndChildren(img.ID, imgs); err != nil {
if err := srv.deleteImageAndChildren(img.ID, imgs, byParents); err != nil {
if err != ErrImageReferenced {
return err
}
@ -1432,7 +1437,7 @@ func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi) error {
}
// If the image is not referenced and has no children, remove it
byParents, err = srv.runtime.graph.ByParent()
byParents, err := srv.runtime.graph.ByParent()
if err != nil {
return err
}
@ -1457,8 +1462,12 @@ func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error {
if err != nil {
return err
}
byParents, err := srv.runtime.graph.ByParent()
if err != nil {
return err
}
// Remove all children images
if err := srv.deleteImageAndChildren(img.Parent, imgs); err != nil {
if err := srv.deleteImageAndChildren(img.Parent, imgs, byParents); err != nil {
return err
}
return srv.deleteImageParents(parent, imgs)
@ -1500,7 +1509,7 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
}
}
if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
if err := srv.deleteImageAndChildren(img.ID, &imgs); err != nil {
if err := srv.deleteImageAndChildren(img.ID, &imgs, nil); err != nil {
if err != ErrImageReferenced {
return imgs, err
}