mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
commit
5a1e4a1092
2 changed files with 31 additions and 6 deletions
|
@ -58,8 +58,8 @@ func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *
|
||||||
container: container,
|
container: container,
|
||||||
restartPolicy: policy,
|
restartPolicy: policy,
|
||||||
timeIncrement: defaultTimeIncrement,
|
timeIncrement: defaultTimeIncrement,
|
||||||
stopChan: make(chan struct{}, 1),
|
stopChan: make(chan struct{}),
|
||||||
startSignal: make(chan struct{}, 1),
|
startSignal: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,8 +103,17 @@ func (m *containerMonitor) Start() error {
|
||||||
exitStatus int
|
exitStatus int
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// this variable indicates that we under container.Lock
|
||||||
|
underLock := true
|
||||||
|
|
||||||
// ensure that when the monitor finally exits we release the networking and unmount the rootfs
|
// ensure that when the monitor finally exits we release the networking and unmount the rootfs
|
||||||
defer m.Close()
|
defer func() {
|
||||||
|
if !underLock {
|
||||||
|
m.container.Lock()
|
||||||
|
defer m.container.Unlock()
|
||||||
|
}
|
||||||
|
m.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
// reset the restart count
|
// reset the restart count
|
||||||
m.container.RestartCount = -1
|
m.container.RestartCount = -1
|
||||||
|
@ -136,6 +145,9 @@ func (m *containerMonitor) Start() error {
|
||||||
log.Errorf("Error running container: %s", err)
|
log.Errorf("Error running container: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// here container.Lock is already lost
|
||||||
|
underLock = false
|
||||||
|
|
||||||
m.resetMonitor(err == nil && exitStatus == 0)
|
m.resetMonitor(err == nil && exitStatus == 0)
|
||||||
|
|
||||||
if m.shouldRestart(exitStatus) {
|
if m.shouldRestart(exitStatus) {
|
||||||
|
@ -244,10 +256,12 @@ func (m *containerMonitor) callback(command *execdriver.Command) {
|
||||||
|
|
||||||
m.container.State.SetRunning(command.Pid())
|
m.container.State.SetRunning(command.Pid())
|
||||||
|
|
||||||
if m.startSignal != nil {
|
|
||||||
// signal that the process has started
|
// signal that the process has started
|
||||||
|
// close channel only if not closed
|
||||||
|
select {
|
||||||
|
case <-m.startSignal:
|
||||||
|
default:
|
||||||
close(m.startSignal)
|
close(m.startSignal)
|
||||||
m.startSignal = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := m.container.ToDisk(); err != nil {
|
if err := m.container.ToDisk(); err != nil {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package daemon
|
package daemon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -49,6 +50,16 @@ func (s *State) String() string {
|
||||||
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type jState State
|
||||||
|
|
||||||
|
// MarshalJSON for state is needed to avoid race conditions on inspect
|
||||||
|
func (s *State) MarshalJSON() ([]byte, error) {
|
||||||
|
s.RLock()
|
||||||
|
b, err := json.Marshal(jState(*s))
|
||||||
|
s.RUnlock()
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
func wait(waitChan <-chan struct{}, timeout time.Duration) error {
|
func wait(waitChan <-chan struct{}, timeout time.Duration) error {
|
||||||
if timeout < 0 {
|
if timeout < 0 {
|
||||||
<-waitChan
|
<-waitChan
|
||||||
|
|
Loading…
Add table
Reference in a new issue