mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Move checkpointing to the Container object
Also hide ViewDB behind an inteface. Signed-off-by: Fabio Kung <fabio.kung@gmail.com>
This commit is contained in:
parent
8e425ebc42
commit
aacddda89d
15 changed files with 79 additions and 85 deletions
|
@ -189,6 +189,21 @@ func (container *Container) ToDiskLocking() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckpointTo makes the Container's current state visible to queries.
|
||||||
|
// Callers must hold a Container lock.
|
||||||
|
func (container *Container) CheckpointTo(store ViewDB) error {
|
||||||
|
return store.Save(container.snapshot())
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckpointAndSaveToDisk is equivalent to calling CheckpointTo and ToDisk.
|
||||||
|
// Callers must hold a Container lock.
|
||||||
|
func (container *Container) CheckpointAndSaveToDisk(store ViewDB) error {
|
||||||
|
if err := container.CheckpointTo(store); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return container.ToDisk()
|
||||||
|
}
|
||||||
|
|
||||||
// readHostConfig reads the host configuration from disk for the container.
|
// readHostConfig reads the host configuration from disk for the container.
|
||||||
func (container *Container) readHostConfig() error {
|
func (container *Container) readHostConfig() error {
|
||||||
container.HostConfig = &containertypes.HostConfig{}
|
container.HostConfig = &containertypes.HostConfig{}
|
||||||
|
|
|
@ -41,7 +41,7 @@ type Snapshot struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Snapshot provides a read only view of a Container. Callers must hold a Lock on the container object.
|
// Snapshot provides a read only view of a Container. Callers must hold a Lock on the container object.
|
||||||
func (container *Container) Snapshot() *Snapshot {
|
func (container *Container) snapshot() *Snapshot {
|
||||||
snapshot := &Snapshot{
|
snapshot := &Snapshot{
|
||||||
ID: container.ID,
|
ID: container.ID,
|
||||||
Name: container.Name,
|
Name: container.Name,
|
||||||
|
|
|
@ -8,6 +8,19 @@ const (
|
||||||
memdbIDIndex = "id"
|
memdbIDIndex = "id"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ViewDB provides an in-memory transactional (ACID) container Store
|
||||||
|
type ViewDB interface {
|
||||||
|
Snapshot() View
|
||||||
|
Save(snapshot *Snapshot) error
|
||||||
|
Delete(id string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// View can be used by readers to avoid locking
|
||||||
|
type View interface {
|
||||||
|
All() ([]Snapshot, error)
|
||||||
|
Get(id string) (*Snapshot, error)
|
||||||
|
}
|
||||||
|
|
||||||
var schema = &memdb.DBSchema{
|
var schema = &memdb.DBSchema{
|
||||||
Tables: map[string]*memdb.TableSchema{
|
Tables: map[string]*memdb.TableSchema{
|
||||||
memdbTable: {
|
memdbTable: {
|
||||||
|
@ -23,46 +36,44 @@ var schema = &memdb.DBSchema{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// MemDB provides an in-memory transactional (ACID) container Store
|
type memDB struct {
|
||||||
type MemDB struct {
|
|
||||||
store *memdb.MemDB
|
store *memdb.MemDB
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMemDB provides the default implementation, with the default schema
|
// NewViewDB provides the default implementation, with the default schema
|
||||||
func NewMemDB() (*MemDB, error) {
|
func NewViewDB() (ViewDB, error) {
|
||||||
store, err := memdb.NewMemDB(schema)
|
store, err := memdb.NewMemDB(schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &MemDB{store: store}, nil
|
return &memDB{store: store}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Snapshot provides a consistent read-only View of the database
|
// Snapshot provides a consistent read-only View of the database
|
||||||
func (db *MemDB) Snapshot() *View {
|
func (db *memDB) Snapshot() View {
|
||||||
return &View{db.store.Txn(false)}
|
return &memdbView{db.store.Txn(false)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save atomically updates the in-memory store
|
// Save atomically updates the in-memory store
|
||||||
func (db *MemDB) Save(snapshot *Snapshot) error {
|
func (db *memDB) Save(snapshot *Snapshot) error {
|
||||||
txn := db.store.Txn(true)
|
txn := db.store.Txn(true)
|
||||||
defer txn.Commit()
|
defer txn.Commit()
|
||||||
return txn.Insert(memdbTable, snapshot)
|
return txn.Insert(memdbTable, snapshot)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete removes an item by ID
|
// Delete removes an item by ID
|
||||||
func (db *MemDB) Delete(id string) error {
|
func (db *memDB) Delete(id string) error {
|
||||||
txn := db.store.Txn(true)
|
txn := db.store.Txn(true)
|
||||||
defer txn.Commit()
|
defer txn.Commit()
|
||||||
return txn.Delete(memdbTable, &Snapshot{ID: id})
|
return txn.Delete(memdbTable, &Snapshot{ID: id})
|
||||||
}
|
}
|
||||||
|
|
||||||
// View can be used by readers to avoid locking
|
type memdbView struct {
|
||||||
type View struct {
|
|
||||||
txn *memdb.Txn
|
txn *memdb.Txn
|
||||||
}
|
}
|
||||||
|
|
||||||
// All returns a all items in this snapshot
|
// All returns a all items in this snapshot
|
||||||
func (v *View) All() ([]Snapshot, error) {
|
func (v *memdbView) All() ([]Snapshot, error) {
|
||||||
var all []Snapshot
|
var all []Snapshot
|
||||||
iter, err := v.txn.Get(memdbTable, memdbIDIndex)
|
iter, err := v.txn.Get(memdbTable, memdbIDIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -80,7 +91,7 @@ func (v *View) All() ([]Snapshot, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
//Get returns an item by id
|
//Get returns an item by id
|
||||||
func (v *View) Get(id string) (*Snapshot, error) {
|
func (v *memdbView) Get(id string) (*Snapshot, error) {
|
||||||
s, err := v.txn.First(memdbTable, memdbIDIndex, id)
|
s, err := v.txn.First(memdbTable, memdbIDIndex, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -3,27 +3,26 @@ package container
|
||||||
import "testing"
|
import "testing"
|
||||||
|
|
||||||
func TestViewSave(t *testing.T) {
|
func TestViewSave(t *testing.T) {
|
||||||
db, err := NewMemDB()
|
db, err := NewViewDB()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
snapshot := NewBaseContainer("id", "root").Snapshot()
|
c := NewBaseContainer("id", "root")
|
||||||
if err := db.Save(snapshot); err != nil {
|
if err := c.CheckpointTo(db); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestViewAll(t *testing.T) {
|
func TestViewAll(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
db, _ = NewMemDB()
|
db, _ = NewViewDB()
|
||||||
one = NewBaseContainer("id1", "root1").Snapshot()
|
one = NewBaseContainer("id1", "root1")
|
||||||
two = NewBaseContainer("id2", "root2").Snapshot()
|
two = NewBaseContainer("id2", "root2")
|
||||||
)
|
)
|
||||||
one.Pid = 10
|
one.Pid = 10
|
||||||
two.Pid = 20
|
two.Pid = 20
|
||||||
db.Save(one)
|
one.CheckpointTo(db)
|
||||||
db.Save(two)
|
two.CheckpointTo(db)
|
||||||
all, err := db.Snapshot().All()
|
all, err := db.Snapshot().All()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -44,10 +43,10 @@ func TestViewAll(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestViewGet(t *testing.T) {
|
func TestViewGet(t *testing.T) {
|
||||||
db, _ := NewMemDB()
|
db, _ := NewViewDB()
|
||||||
one := NewBaseContainer("id", "root")
|
one := NewBaseContainer("id", "root")
|
||||||
one.ImageID = "some-image-123"
|
one.ImageID = "some-image-123"
|
||||||
db.Save(one.Snapshot())
|
one.CheckpointTo(db)
|
||||||
s, err := db.Snapshot().Get("id")
|
s, err := db.Snapshot().Get("id")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
@ -108,13 +108,13 @@ func (daemon *Daemon) Register(c *container.Container) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// once in the memory store it is visible to other goroutines
|
// once in the memory store it is visible to other goroutines
|
||||||
// grab a Lock until it has been replicated to avoid races
|
// grab a Lock until it has been checkpointed to avoid races
|
||||||
c.Lock()
|
c.Lock()
|
||||||
defer c.Unlock()
|
defer c.Unlock()
|
||||||
|
|
||||||
daemon.containers.Add(c.ID, c)
|
daemon.containers.Add(c.ID, c)
|
||||||
daemon.idIndex.Add(c.ID)
|
daemon.idIndex.Add(c.ID)
|
||||||
return daemon.containersReplica.Save(c.Snapshot())
|
return c.CheckpointTo(daemon.containersReplica)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (daemon *Daemon) newContainer(name string, platform string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) {
|
func (daemon *Daemon) newContainer(name string, platform string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) {
|
||||||
|
@ -218,10 +218,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *
|
||||||
|
|
||||||
runconfig.SetDefaultNetModeIfBlank(hostConfig)
|
runconfig.SetDefaultNetModeIfBlank(hostConfig)
|
||||||
container.HostConfig = hostConfig
|
container.HostConfig = hostConfig
|
||||||
if err := daemon.containersReplica.Save(container.Snapshot()); err != nil {
|
return container.CheckpointAndSaveToDisk(daemon.containersReplica)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return container.ToDisk()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyContainerSettings performs validation of the hostconfig and config
|
// verifyContainerSettings performs validation of the hostconfig and config
|
||||||
|
|
|
@ -45,14 +45,11 @@ func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []str
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (daemon *Daemon) saveAndReplicate(container *container.Container) error {
|
func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
|
||||||
container.Lock()
|
container.Lock()
|
||||||
defer container.Unlock()
|
defer container.Unlock()
|
||||||
if err := daemon.containersReplica.Save(container.Snapshot()); err != nil {
|
if err := container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
|
||||||
return fmt.Errorf("Error replicating container state: %v", err)
|
return fmt.Errorf("Error saving container state: %v", err)
|
||||||
}
|
|
||||||
if err := container.ToDisk(); err != nil {
|
|
||||||
return fmt.Errorf("Error saving container to disk: %v", err)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1018,10 +1015,8 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := daemon.saveAndReplicate(container); err != nil {
|
|
||||||
return fmt.Errorf("Error saving container to disk: %v", err)
|
return daemon.checkpointAndSave(container)
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DisconnectFromNetwork disconnects container from network n.
|
// DisconnectFromNetwork disconnects container from network n.
|
||||||
|
@ -1057,8 +1052,8 @@ func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, netw
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := daemon.saveAndReplicate(container); err != nil {
|
if err := daemon.checkpointAndSave(container); err != nil {
|
||||||
return fmt.Errorf("Error saving container to disk: %v", err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if n != nil {
|
if n != nil {
|
||||||
|
|
|
@ -83,7 +83,7 @@ type Daemon struct {
|
||||||
ID string
|
ID string
|
||||||
repository string
|
repository string
|
||||||
containers container.Store
|
containers container.Store
|
||||||
containersReplica *container.MemDB
|
containersReplica container.ViewDB
|
||||||
execCommands *exec.Store
|
execCommands *exec.Store
|
||||||
downloadManager *xfer.LayerDownloadManager
|
downloadManager *xfer.LayerDownloadManager
|
||||||
uploadManager *xfer.LayerUploadManager
|
uploadManager *xfer.LayerUploadManager
|
||||||
|
@ -762,7 +762,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
|
||||||
d.ID = trustKey.PublicKey().KeyID()
|
d.ID = trustKey.PublicKey().KeyID()
|
||||||
d.repository = daemonRepo
|
d.repository = daemonRepo
|
||||||
d.containers = container.NewMemoryStore()
|
d.containers = container.NewMemoryStore()
|
||||||
if d.containersReplica, err = container.NewMemDB(); err != nil {
|
if d.containersReplica, err = container.NewViewDB(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.execCommands = exec.NewStore()
|
d.execCommands = exec.NewStore()
|
||||||
|
|
|
@ -105,7 +105,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
|
||||||
// Mark container dead. We don't want anybody to be restarting it.
|
// Mark container dead. We don't want anybody to be restarting it.
|
||||||
container.Lock()
|
container.Lock()
|
||||||
container.Dead = true
|
container.Dead = true
|
||||||
if err = daemon.containersReplica.Save(container.Snapshot()); err != nil {
|
if err = container.CheckpointTo(daemon.containersReplica); err != nil {
|
||||||
container.Unlock()
|
container.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -168,9 +168,9 @@ func handleProbeResult(d *Daemon, c *container.Container, result *types.Healthch
|
||||||
}
|
}
|
||||||
|
|
||||||
// replicate Health status changes
|
// replicate Health status changes
|
||||||
if err := d.containersReplica.Save(c.Snapshot()); err != nil {
|
if err := c.CheckpointTo(d.containersReplica); err != nil {
|
||||||
// queries will be inconsistent until the next probe runs or other state mutations
|
// queries will be inconsistent until the next probe runs or other state mutations
|
||||||
// trigger a replication
|
// checkpoint the container
|
||||||
logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err)
|
logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ func TestNoneHealthcheck(t *testing.T) {
|
||||||
},
|
},
|
||||||
State: &container.State{},
|
State: &container.State{},
|
||||||
}
|
}
|
||||||
store, err := container.NewMemDB()
|
store, err := container.NewViewDB()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ func TestHealthStates(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
store, err := container.NewMemDB()
|
store, err := container.NewViewDB()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,7 @@ func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.C
|
||||||
return daemon.reduceContainers(config, daemon.transformContainer)
|
return daemon.reduceContainers(config, daemon.transformContainer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (daemon *Daemon) filterByNameIDMatches(view *container.View, ctx *listContext) ([]container.Snapshot, error) {
|
func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) {
|
||||||
idSearch := false
|
idSearch := false
|
||||||
names := ctx.filters.Get("name")
|
names := ctx.filters.Get("name")
|
||||||
ids := ctx.filters.Get("id")
|
ids := ctx.filters.Get("id")
|
||||||
|
@ -240,7 +240,7 @@ func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *list
|
||||||
}
|
}
|
||||||
|
|
||||||
// foldFilter generates the container filter based on the user's filtering options.
|
// foldFilter generates the container filter based on the user's filtering options.
|
||||||
func (daemon *Daemon) foldFilter(view *container.View, config *types.ContainerListOptions) (*listContext, error) {
|
func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerListOptions) (*listContext, error) {
|
||||||
psFilters := config.Filters
|
psFilters := config.Filters
|
||||||
|
|
||||||
if err := psFilters.Validate(acceptedPsFilterTags); err != nil {
|
if err := psFilters.Validate(acceptedPsFilterTags); err != nil {
|
||||||
|
|
|
@ -90,10 +90,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
|
||||||
daemon.setStateCounter(c)
|
daemon.setStateCounter(c)
|
||||||
|
|
||||||
defer c.Unlock()
|
defer c.Unlock()
|
||||||
if err := daemon.containersReplica.Save(c.Snapshot()); err != nil {
|
if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.ToDisk(); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return daemon.postRunProcessing(c, e)
|
return daemon.postRunProcessing(c, e)
|
||||||
|
@ -122,11 +119,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
|
||||||
c.HasBeenStartedBefore = true
|
c.HasBeenStartedBefore = true
|
||||||
daemon.setStateCounter(c)
|
daemon.setStateCounter(c)
|
||||||
|
|
||||||
if err := daemon.containersReplica.Save(c.Snapshot()); err != nil {
|
if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
|
||||||
c.Reset(false)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.ToDisk(); err != nil {
|
|
||||||
c.Reset(false)
|
c.Reset(false)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -137,10 +130,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
|
||||||
// Container is already locked in this case
|
// Container is already locked in this case
|
||||||
c.Paused = true
|
c.Paused = true
|
||||||
daemon.setStateCounter(c)
|
daemon.setStateCounter(c)
|
||||||
if err := daemon.containersReplica.Save(c.Snapshot()); err != nil {
|
if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.ToDisk(); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
daemon.updateHealthMonitor(c)
|
daemon.updateHealthMonitor(c)
|
||||||
|
@ -149,10 +139,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
|
||||||
// Container is already locked in this case
|
// Container is already locked in this case
|
||||||
c.Paused = false
|
c.Paused = false
|
||||||
daemon.setStateCounter(c)
|
daemon.setStateCounter(c)
|
||||||
if err := daemon.containersReplica.Save(c.Snapshot()); err != nil {
|
if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := c.ToDisk(); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
daemon.updateHealthMonitor(c)
|
daemon.updateHealthMonitor(c)
|
||||||
|
|
|
@ -82,10 +82,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
|
||||||
daemon.nameIndex.Release(oldName + k)
|
daemon.nameIndex.Release(oldName + k)
|
||||||
}
|
}
|
||||||
daemon.releaseName(oldName)
|
daemon.releaseName(oldName)
|
||||||
if err = daemon.containersReplica.Save(container.Snapshot()); err != nil {
|
if err = container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = container.ToDisk(); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,10 +99,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
container.Name = oldName
|
container.Name = oldName
|
||||||
container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint
|
container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint
|
||||||
if e := daemon.containersReplica.Save(container.Snapshot()); err != nil {
|
if e := container.CheckpointAndSaveToDisk(daemon.containersReplica); e != nil {
|
||||||
logrus.Errorf("%s: Failed in replicating state on rename failure: %v", container.ID, e)
|
|
||||||
}
|
|
||||||
if e := container.ToDisk(); e != nil {
|
|
||||||
logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e)
|
logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,11 +117,8 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
|
||||||
if container.ExitCode() == 0 {
|
if container.ExitCode() == 0 {
|
||||||
container.SetExitCode(128)
|
container.SetExitCode(128)
|
||||||
}
|
}
|
||||||
if err := daemon.containersReplica.Save(container.Snapshot()); err != nil {
|
if err := container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
|
||||||
logrus.Errorf("%s: failed replicating state on start failure: %v", container.ID, err)
|
logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err)
|
||||||
}
|
|
||||||
if err := container.ToDisk(); err != nil {
|
|
||||||
logrus.Errorf("%s: failed writing to disk on start failure: %v", container.ID, err)
|
|
||||||
}
|
}
|
||||||
container.Reset(false)
|
container.Reset(false)
|
||||||
|
|
||||||
|
|
|
@ -38,8 +38,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
|
||||||
if restoreConfig {
|
if restoreConfig {
|
||||||
container.Lock()
|
container.Lock()
|
||||||
container.HostConfig = &backupHostConfig
|
container.HostConfig = &backupHostConfig
|
||||||
daemon.containersReplica.Save(container.Snapshot())
|
container.CheckpointAndSaveToDisk(daemon.containersReplica)
|
||||||
container.ToDisk()
|
|
||||||
container.Unlock()
|
container.Unlock()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -54,7 +53,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
|
||||||
container.Unlock()
|
container.Unlock()
|
||||||
return errCannotUpdate(container.ID, err)
|
return errCannotUpdate(container.ID, err)
|
||||||
}
|
}
|
||||||
if err := daemon.containersReplica.Save(container.Snapshot()); err != nil {
|
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
||||||
restoreConfig = true
|
restoreConfig = true
|
||||||
container.Unlock()
|
container.Unlock()
|
||||||
return errCannotUpdate(container.ID, err)
|
return errCannotUpdate(container.ID, err)
|
||||||
|
|
Loading…
Reference in a new issue