mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #2798 from dotcloud/fix_state_race
Refactor State to be 100% thread safe
This commit is contained in:
commit
25e443a3c7
10 changed files with 139 additions and 95 deletions
|
@ -1543,7 +1543,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
return fmt.Errorf("Impossible to attach to a stopped container, start it first")
|
||||
}
|
||||
|
||||
|
@ -2517,7 +2517,7 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
|
|||
if err := json.Unmarshal(body, c); err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
return c.State.Running, c.State.ExitCode, nil
|
||||
return c.State.IsRunning(), c.State.GetExitCode(), nil
|
||||
}
|
||||
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
|
||||
|
|
53
container.go
53
container.go
|
@ -19,11 +19,14 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
sync.Mutex
|
||||
|
||||
root string
|
||||
|
||||
ID string
|
||||
|
@ -487,9 +490,10 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
|||
}
|
||||
|
||||
func (container *Container) Start() (err error) {
|
||||
container.State.Lock()
|
||||
defer container.State.Unlock()
|
||||
if container.State.Running {
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
if container.State.IsRunning() {
|
||||
return fmt.Errorf("The container %s is already running.", container.ID)
|
||||
}
|
||||
defer func() {
|
||||
|
@ -818,7 +822,7 @@ func (container *Container) Start() (err error) {
|
|||
}
|
||||
// FIXME: save state on disk *first*, then converge
|
||||
// this way disk state is used as a journal, eg. we can restore after crash etc.
|
||||
container.State.setRunning(container.cmd.Process.Pid)
|
||||
container.State.SetRunning(container.cmd.Process.Pid)
|
||||
|
||||
// Init the lock
|
||||
container.waitLock = make(chan struct{})
|
||||
|
@ -826,14 +830,14 @@ func (container *Container) Start() (err error) {
|
|||
container.ToDisk()
|
||||
go container.monitor()
|
||||
|
||||
defer utils.Debugf("Container running: %v", container.State.Running)
|
||||
defer utils.Debugf("Container running: %v", container.State.IsRunning())
|
||||
// We wait for the container to be fully running.
|
||||
// Timeout after 5 seconds. In case of broken pipe, just retry.
|
||||
// Note: The container can run and finish correctly before
|
||||
// the end of this loop
|
||||
for now := time.Now(); time.Since(now) < 5*time.Second; {
|
||||
// If the container dies while waiting for it, just return
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
output, err := exec.Command("lxc-info", "-s", "-n", container.ID).CombinedOutput()
|
||||
|
@ -850,11 +854,11 @@ func (container *Container) Start() (err error) {
|
|||
if strings.Contains(string(output), "RUNNING") {
|
||||
return nil
|
||||
}
|
||||
utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.Running, bytes.TrimSpace(output))
|
||||
utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.IsRunning(), bytes.TrimSpace(output))
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
return ErrContainerStartTimeout
|
||||
}
|
||||
return ErrContainerStart
|
||||
|
@ -935,11 +939,12 @@ func (container *Container) allocateNetwork() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var iface *NetworkInterface
|
||||
var err error
|
||||
if container.State.Ghost {
|
||||
manager := container.runtime.networkManager
|
||||
if manager.disabled {
|
||||
var (
|
||||
iface *NetworkInterface
|
||||
err error
|
||||
)
|
||||
if container.State.IsGhost() {
|
||||
if manager := container.runtime.networkManager; manager.disabled {
|
||||
iface = &NetworkInterface{disabled: true}
|
||||
} else {
|
||||
iface = &NetworkInterface{
|
||||
|
@ -975,10 +980,12 @@ func (container *Container) allocateNetwork() error {
|
|||
}
|
||||
}
|
||||
|
||||
portSpecs := make(map[Port]struct{})
|
||||
bindings := make(map[Port][]PortBinding)
|
||||
var (
|
||||
portSpecs = make(map[Port]struct{})
|
||||
bindings = make(map[Port][]PortBinding)
|
||||
)
|
||||
|
||||
if !container.State.Ghost {
|
||||
if !container.State.IsGhost() {
|
||||
if container.Config.ExposedPorts != nil {
|
||||
portSpecs = container.Config.ExposedPorts
|
||||
}
|
||||
|
@ -1087,7 +1094,7 @@ func (container *Container) monitor() {
|
|||
}
|
||||
|
||||
// Report status back
|
||||
container.State.setStopped(exitCode)
|
||||
container.State.SetStopped(exitCode)
|
||||
|
||||
// Release the lock
|
||||
close(container.waitLock)
|
||||
|
@ -1137,10 +1144,10 @@ func (container *Container) cleanup() {
|
|||
}
|
||||
|
||||
func (container *Container) kill(sig int) error {
|
||||
container.State.Lock()
|
||||
defer container.State.Unlock()
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1153,7 +1160,7 @@ func (container *Container) kill(sig int) error {
|
|||
}
|
||||
|
||||
func (container *Container) Kill() error {
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1178,7 +1185,7 @@ func (container *Container) Kill() error {
|
|||
}
|
||||
|
||||
func (container *Container) Stop(seconds int) error {
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1212,7 +1219,7 @@ func (container *Container) Restart(seconds int) error {
|
|||
// Wait blocks until the container stops running, then returns its exit code.
|
||||
func (container *Container) Wait() int {
|
||||
<-container.waitLock
|
||||
return container.State.ExitCode
|
||||
return container.State.GetExitCode()
|
||||
}
|
||||
|
||||
func (container *Container) Resize(h, w int) error {
|
||||
|
|
|
@ -289,7 +289,7 @@ func TestRunDisconnect(t *testing.T) {
|
|||
setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
|
||||
container := globalRuntime.List()[0]
|
||||
container.Wait()
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
t.Fatalf("/bin/cat is still running after closing stdin")
|
||||
}
|
||||
})
|
||||
|
@ -319,7 +319,7 @@ func TestRunDisconnectTty(t *testing.T) {
|
|||
for {
|
||||
// Client disconnect after run -i should keep stdin out in TTY mode
|
||||
l := globalRuntime.List()
|
||||
if len(l) == 1 && l[0].State.Running {
|
||||
if len(l) == 1 && l[0].State.IsRunning() {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
@ -345,7 +345,7 @@ func TestRunDisconnectTty(t *testing.T) {
|
|||
|
||||
// Give some time to monitor to do his thing
|
||||
container.WaitTimeout(500 * time.Millisecond)
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)")
|
||||
}
|
||||
}
|
||||
|
@ -454,7 +454,7 @@ func TestRunDetach(t *testing.T) {
|
|||
})
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
t.Fatal("The detached container should be still running")
|
||||
}
|
||||
|
||||
|
@ -534,7 +534,7 @@ func TestAttachDetach(t *testing.T) {
|
|||
})
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
t.Fatal("The detached container should be still running")
|
||||
}
|
||||
|
||||
|
@ -596,7 +596,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
|
|||
})
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
t.Fatal("The detached container should be still running")
|
||||
}
|
||||
|
||||
|
@ -629,7 +629,7 @@ func TestAttachDisconnect(t *testing.T) {
|
|||
setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() {
|
||||
for {
|
||||
l := globalRuntime.List()
|
||||
if len(l) == 1 && l[0].State.Running {
|
||||
if len(l) == 1 && l[0].State.IsRunning() {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
@ -665,7 +665,7 @@ func TestAttachDisconnect(t *testing.T) {
|
|||
// We closed stdin, expect /bin/cat to still be running
|
||||
// Wait a little bit to make sure container.monitor() did his thing
|
||||
err := container.WaitTimeout(500 * time.Millisecond)
|
||||
if err == nil || !container.State.Running {
|
||||
if err == nil || !container.State.IsRunning() {
|
||||
t.Fatalf("/bin/cat is not running after closing stdin")
|
||||
}
|
||||
|
||||
|
|
|
@ -224,13 +224,13 @@ func TestCommitAutoRun(t *testing.T) {
|
|||
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
if container1.State.Running {
|
||||
if container1.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
if err := container1.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if container1.State.Running {
|
||||
if container1.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
|
||||
|
@ -280,13 +280,13 @@ func TestCommitRun(t *testing.T) {
|
|||
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
if container1.State.Running {
|
||||
if container1.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
if err := container1.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if container1.State.Running {
|
||||
if container1.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
|
||||
|
@ -352,7 +352,7 @@ func TestStart(t *testing.T) {
|
|||
// Give some time to the process to start
|
||||
container.WaitTimeout(500 * time.Millisecond)
|
||||
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
t.Errorf("Container should be running")
|
||||
}
|
||||
if err := container.Start(); err == nil {
|
||||
|
@ -370,13 +370,13 @@ func TestRun(t *testing.T) {
|
|||
container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
if err := container.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
}
|
||||
|
@ -400,7 +400,7 @@ func TestOutput(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
if string(output) != "foobar" {
|
||||
t.Error(string(output))
|
||||
t.Fatalf("%s != %s", string(output), "foobar")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -421,8 +421,8 @@ func TestContainerNetwork(t *testing.T) {
|
|||
if err := container.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if container.State.ExitCode != 0 {
|
||||
t.Errorf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", container.State.ExitCode)
|
||||
if code := container.State.GetExitCode(); code != 0 {
|
||||
t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -446,7 +446,7 @@ func TestKillDifferentUser(t *testing.T) {
|
|||
// there is a side effect I'm not seeing.
|
||||
// defer container.stdin.Close()
|
||||
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
if err := container.Start(); err != nil {
|
||||
|
@ -454,7 +454,7 @@ func TestKillDifferentUser(t *testing.T) {
|
|||
}
|
||||
|
||||
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
|
||||
for !container.State.Running {
|
||||
for !container.State.IsRunning() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
|
@ -471,11 +471,11 @@ func TestKillDifferentUser(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
container.Wait()
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
// Try stopping twice
|
||||
|
@ -533,7 +533,7 @@ func TestKill(t *testing.T) {
|
|||
}
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
if err := container.Start(); err != nil {
|
||||
|
@ -543,17 +543,17 @@ func TestKill(t *testing.T) {
|
|||
// Give some time to lxc to spawn the process
|
||||
container.WaitTimeout(500 * time.Millisecond)
|
||||
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
t.Errorf("Container should be running")
|
||||
}
|
||||
if err := container.Kill(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
container.Wait()
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
// Try stopping twice
|
||||
|
@ -577,8 +577,8 @@ func TestExitCode(t *testing.T) {
|
|||
if err := trueContainer.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if trueContainer.State.ExitCode != 0 {
|
||||
t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode)
|
||||
if code := trueContainer.State.GetExitCode(); code != 0 {
|
||||
t.Fatalf("Unexpected exit code %d (expected 0)", code)
|
||||
}
|
||||
|
||||
falseContainer, _, err := runtime.Create(&docker.Config{
|
||||
|
@ -592,8 +592,8 @@ func TestExitCode(t *testing.T) {
|
|||
if err := falseContainer.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if falseContainer.State.ExitCode != 1 {
|
||||
t.Errorf("Unexpected exit code %d (expected 1)", falseContainer.State.ExitCode)
|
||||
if code := falseContainer.State.GetExitCode(); code != 1 {
|
||||
t.Fatalf("Unexpected exit code %d (expected 1)", code)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -741,7 +741,7 @@ func TestUser(t *testing.T) {
|
|||
}
|
||||
defer runtime.Destroy(container)
|
||||
output, err = container.Output()
|
||||
if err != nil || container.State.ExitCode != 0 {
|
||||
if code := container.State.GetExitCode(); err != nil || code != 0 {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !strings.Contains(string(output), "uid=0(root) gid=0(root)") {
|
||||
|
@ -757,12 +757,12 @@ func TestUser(t *testing.T) {
|
|||
},
|
||||
"",
|
||||
)
|
||||
if err != nil || container.State.ExitCode != 0 {
|
||||
if code := container.State.GetExitCode(); err != nil || code != 0 {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
output, err = container.Output()
|
||||
if err != nil || container.State.ExitCode != 0 {
|
||||
if code := container.State.GetExitCode(); err != nil || code != 0 {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !strings.Contains(string(output), "uid=0(root) gid=0(root)") {
|
||||
|
@ -785,8 +785,8 @@ func TestUser(t *testing.T) {
|
|||
output, err = container.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if container.State.ExitCode != 0 {
|
||||
t.Fatalf("Container exit code is invalid: %d\nOutput:\n%s\n", container.State.ExitCode, output)
|
||||
} else if code := container.State.GetExitCode(); code != 0 {
|
||||
t.Fatalf("Container exit code is invalid: %d\nOutput:\n%s\n", code, output)
|
||||
}
|
||||
if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") {
|
||||
t.Error(string(output))
|
||||
|
@ -806,7 +806,7 @@ func TestUser(t *testing.T) {
|
|||
}
|
||||
defer runtime.Destroy(container)
|
||||
output, err = container.Output()
|
||||
if err != nil || container.State.ExitCode != 0 {
|
||||
if code := container.State.GetExitCode(); err != nil || code != 0 {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") {
|
||||
|
@ -827,7 +827,7 @@ func TestUser(t *testing.T) {
|
|||
}
|
||||
defer runtime.Destroy(container)
|
||||
output, err = container.Output()
|
||||
if container.State.ExitCode == 0 {
|
||||
if container.State.GetExitCode() == 0 {
|
||||
t.Fatal("Starting container with wrong uid should fail but it passed.")
|
||||
}
|
||||
}
|
||||
|
@ -871,10 +871,10 @@ func TestMultipleContainers(t *testing.T) {
|
|||
container2.WaitTimeout(250 * time.Millisecond)
|
||||
|
||||
// If we are here, both containers should be running
|
||||
if !container1.State.Running {
|
||||
if !container1.State.IsRunning() {
|
||||
t.Fatal("Container not running")
|
||||
}
|
||||
if !container2.State.Running {
|
||||
if !container2.State.IsRunning() {
|
||||
t.Fatal("Container not running")
|
||||
}
|
||||
|
||||
|
@ -1176,13 +1176,13 @@ func TestCopyVolumeUidGid(t *testing.T) {
|
|||
container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
|
||||
defer r.Destroy(container1)
|
||||
|
||||
if container1.State.Running {
|
||||
if container1.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
if err := container1.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if container1.State.Running {
|
||||
if container1.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
|
||||
|
@ -1210,13 +1210,13 @@ func TestCopyVolumeContent(t *testing.T) {
|
|||
container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
|
||||
defer r.Destroy(container1)
|
||||
|
||||
if container1.State.Running {
|
||||
if container1.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
if err := container1.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if container1.State.Running {
|
||||
if container1.State.IsRunning() {
|
||||
t.Errorf("Container shouldn't be running")
|
||||
}
|
||||
|
||||
|
@ -1666,17 +1666,17 @@ func TestRestartGhost(t *testing.T) {
|
|||
},
|
||||
"",
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := container.Kill(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
container.State.Ghost = true
|
||||
_, err = container.Output()
|
||||
container.State.SetGhost(true)
|
||||
|
||||
_, err = container.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -419,7 +419,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *doc
|
|||
}
|
||||
|
||||
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
|
||||
for !container.State.Running {
|
||||
for !container.State.IsRunning() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
|
@ -533,7 +533,7 @@ func TestRestore(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container2.State.Running {
|
||||
if !container2.State.IsRunning() {
|
||||
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
|
||||
}
|
||||
|
||||
|
@ -543,7 +543,7 @@ func TestRestore(t *testing.T) {
|
|||
if err := container2.WaitTimeout(2 * time.Second); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container2.State.Running = true
|
||||
container2.State.SetRunning(42)
|
||||
container2.ToDisk()
|
||||
|
||||
if len(runtime1.List()) != 2 {
|
||||
|
@ -553,7 +553,7 @@ func TestRestore(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container2.State.Running {
|
||||
if !container2.State.IsRunning() {
|
||||
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
|
||||
}
|
||||
|
||||
|
@ -577,7 +577,7 @@ func TestRestore(t *testing.T) {
|
|||
}
|
||||
runningCount := 0
|
||||
for _, c := range runtime2.List() {
|
||||
if c.State.Running {
|
||||
if c.State.IsRunning() {
|
||||
t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
|
||||
runningCount++
|
||||
}
|
||||
|
@ -592,7 +592,7 @@ func TestRestore(t *testing.T) {
|
|||
if err := container3.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container2.State.Running = false
|
||||
container2.State.SetStopped(0)
|
||||
}
|
||||
|
||||
func TestReloadContainerLinks(t *testing.T) {
|
||||
|
@ -638,11 +638,11 @@ func TestReloadContainerLinks(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container2.State.Running {
|
||||
if !container2.State.IsRunning() {
|
||||
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
|
||||
}
|
||||
|
||||
if !container1.State.Running {
|
||||
if !container1.State.IsRunning() {
|
||||
t.Fatalf("Container %s should appear as running but isn't", container1.ID)
|
||||
}
|
||||
|
||||
|
@ -669,7 +669,7 @@ func TestReloadContainerLinks(t *testing.T) {
|
|||
}
|
||||
runningCount := 0
|
||||
for _, c := range runtime2.List() {
|
||||
if c.State.Running {
|
||||
if c.State.IsRunning() {
|
||||
runningCount++
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ func containerKill(eng *engine.Engine, id string, t utils.Fataler) {
|
|||
}
|
||||
|
||||
func containerRunning(eng *engine.Engine, id string, t utils.Fataler) bool {
|
||||
return getContainer(eng, id, t).State.Running
|
||||
return getContainer(eng, id, t).State.IsRunning()
|
||||
}
|
||||
|
||||
func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) {
|
||||
|
|
2
links.go
2
links.go
|
@ -21,7 +21,7 @@ func NewLink(parent, child *Container, name, bridgeInterface string) (*Link, err
|
|||
if parent.ID == child.ID {
|
||||
return nil, fmt.Errorf("Cannot link to self: %s == %s", parent.ID, child.ID)
|
||||
}
|
||||
if !child.State.Running {
|
||||
if !child.State.IsRunning() {
|
||||
return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, name)
|
||||
}
|
||||
|
||||
|
|
12
runtime.go
12
runtime.go
|
@ -100,8 +100,8 @@ func (runtime *Runtime) load(id string) (*Container, error) {
|
|||
if container.ID != id {
|
||||
return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
|
||||
}
|
||||
if container.State.Running {
|
||||
container.State.Ghost = true
|
||||
if container.State.IsRunning() {
|
||||
container.State.SetGhost(true)
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
// FIXME: if the container is supposed to be running but is not, auto restart it?
|
||||
// if so, then we need to restart monitor and init a new lock
|
||||
// If the container is supposed to be running, make sure of it
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -145,14 +145,14 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
utils.Debugf("Container %s was supposed to be running be is not.", container.ID)
|
||||
if runtime.config.AutoRestart {
|
||||
utils.Debugf("Restarting")
|
||||
container.State.Ghost = false
|
||||
container.State.setStopped(0)
|
||||
container.State.SetGhost(false)
|
||||
container.State.SetStopped(0)
|
||||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
utils.Debugf("Marking as stopped")
|
||||
container.State.setStopped(-127)
|
||||
container.State.SetStopped(-127)
|
||||
if err := container.ToDisk(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -694,7 +694,7 @@ func (srv *Server) Containers(all, size bool, n int, since, before string) []API
|
|||
}, -1)
|
||||
|
||||
for _, container := range srv.runtime.List() {
|
||||
if !container.State.Running && !all && n == -1 && since == "" && before == "" {
|
||||
if !container.State.IsRunning() && !all && n == -1 && since == "" && before == "" {
|
||||
continue
|
||||
}
|
||||
if before != "" && !foundBefore {
|
||||
|
@ -1339,7 +1339,7 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool)
|
|||
}
|
||||
|
||||
if container != nil {
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
return fmt.Errorf("Impossible to remove a running container, please stop it first")
|
||||
}
|
||||
volumes := make(map[string]struct{})
|
||||
|
@ -1516,7 +1516,7 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
|
|||
|
||||
// Prevent deletion if image is used by a running container
|
||||
for _, container := range srv.runtime.List() {
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
parent, err := srv.runtime.repositories.LookupImage(container.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1738,7 +1738,7 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std
|
|||
|
||||
//stream
|
||||
if stream {
|
||||
if container.State.Ghost {
|
||||
if container.State.IsGhost() {
|
||||
return fmt.Errorf("Impossible to attach to a ghost container")
|
||||
}
|
||||
|
||||
|
|
43
state.go
43
state.go
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
type State struct {
|
||||
sync.Mutex
|
||||
sync.RWMutex
|
||||
Running bool
|
||||
Pid int
|
||||
ExitCode int
|
||||
|
@ -19,6 +19,9 @@ type State struct {
|
|||
|
||||
// String returns a human-readable description of the state
|
||||
func (s *State) String() string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
if s.Running {
|
||||
if s.Ghost {
|
||||
return fmt.Sprintf("Ghost")
|
||||
|
@ -28,7 +31,38 @@ func (s *State) String() string {
|
|||
return fmt.Sprintf("Exit %d", s.ExitCode)
|
||||
}
|
||||
|
||||
func (s *State) setRunning(pid int) {
|
||||
func (s *State) IsRunning() bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.Running
|
||||
}
|
||||
|
||||
func (s *State) IsGhost() bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.Ghost
|
||||
}
|
||||
|
||||
func (s *State) GetExitCode() int {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.ExitCode
|
||||
}
|
||||
|
||||
func (s *State) SetGhost(val bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.Ghost = val
|
||||
}
|
||||
|
||||
func (s *State) SetRunning(pid int) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.Running = true
|
||||
s.Ghost = false
|
||||
s.ExitCode = 0
|
||||
|
@ -36,7 +70,10 @@ func (s *State) setRunning(pid int) {
|
|||
s.StartedAt = time.Now()
|
||||
}
|
||||
|
||||
func (s *State) setStopped(exitCode int) {
|
||||
func (s *State) SetStopped(exitCode int) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.Running = false
|
||||
s.Pid = 0
|
||||
s.FinishedAt = time.Now()
|
||||
|
|
Loading…
Reference in a new issue