Merge pull request #4534 from creack/fix_race_kill

Fix Race when killing the daemon
This commit is contained in:
Michael Crosby 2014-03-07 18:11:51 -05:00
commit d8425d98e6
2 changed files with 32 additions and 14 deletions

View File

@ -784,17 +784,19 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
utils.Errorf("Error running container: %s", err)
}
container.State.SetStopped(exitCode)
if container.runtime.srv.IsRunning() {
container.State.SetStopped(exitCode)
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
// from the filesystem... At this point it may already have done so.
// This is because State.setStopped() has already been called, and has caused Wait()
// to return.
// FIXME: why are we serializing running state to disk in the first place?
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
if err := container.ToDisk(); err != nil {
utils.Errorf("Error dumping container state to disk: %s\n", err)
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
// from the filesystem... At this point it may already have done so.
// This is because State.setStopped() has already been called, and has caused Wait()
// to return.
// FIXME: why are we serializing running state to disk in the first place?
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
if err := container.ToDisk(); err != nil {
utils.Errorf("Error dumping container state to disk: %s\n", err)
}
}
// Cleanup

View File

@ -29,10 +29,6 @@ import (
"time"
)
func (srv *Server) Close() error {
return srv.runtime.Close()
}
// jobInitApi runs the remote api server `srv` as a daemon,
// Only one api server can run at the same time - this is enforced by a pidfile.
// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
@ -2330,6 +2326,7 @@ func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
pushingPool: make(map[string]chan struct{}),
events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
listeners: make(map[string]chan utils.JSONMessage),
running: true,
}
runtime.srv = srv
return srv, nil
@ -2379,6 +2376,24 @@ func (srv *Server) GetEvents() []utils.JSONMessage {
return srv.events
}
func (srv *Server) SetRunning(status bool) {
srv.Lock()
defer srv.Unlock()
srv.running = status
}
func (srv *Server) IsRunning() bool {
srv.RLock()
defer srv.RUnlock()
return srv.running
}
func (srv *Server) Close() error {
srv.SetRunning(false)
return srv.runtime.Close()
}
type Server struct {
sync.RWMutex
runtime *Runtime
@ -2387,4 +2402,5 @@ type Server struct {
events []utils.JSONMessage
listeners map[string]chan utils.JSONMessage
Eng *engine.Engine
running bool
}