save deep copies of Container in the replica store

Reuse existing structures and rely on json serialization to deep copy
Container objects.

Also consolidate all "save" operations on container.CheckpointTo, which
now both saves a serialized json to disk, and replicates state to the
ACID in-memory store.

Signed-off-by: Fabio Kung <fabio.kung@gmail.com>
This commit is contained in:
Fabio Kung 2017-03-27 10:18:53 -07:00
parent f668af4475
commit edad52707c
18 changed files with 341 additions and 276 deletions

View File

@ -159,7 +159,7 @@ func (container *Container) FromDisk() error {
} }
// ToDisk saves the container configuration on disk. // ToDisk saves the container configuration on disk.
func (container *Container) ToDisk() error { func (container *Container) toDisk() error {
pth, err := container.ConfigPath() pth, err := container.ConfigPath()
if err != nil { if err != nil {
return err return err
@ -181,27 +181,13 @@ func (container *Container) ToDisk() error {
return container.WriteHostConfig() return container.WriteHostConfig()
} }
// ToDiskLocking saves the container configuration on disk in a thread safe way. // CheckpointTo makes the Container's current state visible to queries, and persists state.
func (container *Container) ToDiskLocking() error {
container.Lock()
err := container.ToDisk()
container.Unlock()
return err
}
// CheckpointTo makes the Container's current state visible to queries.
// Callers must hold a Container lock. // Callers must hold a Container lock.
func (container *Container) CheckpointTo(store ViewDB) error { func (container *Container) CheckpointTo(store ViewDB) error {
return store.Save(container.snapshot()) if err := container.toDisk(); err != nil {
}
// CheckpointAndSaveToDisk is equivalent to calling CheckpointTo and ToDisk.
// Callers must hold a Container lock.
func (container *Container) CheckpointAndSaveToDisk(store ViewDB) error {
if err := container.CheckpointTo(store); err != nil {
return err return err
} }
return container.ToDisk() return store.Save(container)
} }
// readHostConfig reads the host configuration from disk for the container. // readHostConfig reads the host configuration from disk for the container.

View File

@ -1,153 +0,0 @@
package container
import (
"fmt"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
)
// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a
// versioned ACID in-memory store. Pointers are avoided here to make sure all values are copied into the store.
type Snapshot struct {
ID string `json:"Id"`
Name string
Pid int
Managed bool
Image string
ImageID string
Command string
Ports []types.Port
ExposedPorts nat.PortSet
PublishPorts nat.PortSet
Labels map[string]string
State string
Status string
Health string
HostConfig struct {
NetworkMode string
Isolation string
}
NetworkSettings types.SummaryNetworkSettings
Mounts []types.MountPoint
Created time.Time
StartedAt time.Time
Running bool
Paused bool
ExitCode int
}
// Snapshot provides a read only view of a Container. Callers must hold a Lock on the container object.
func (container *Container) snapshot() *Snapshot {
snapshot := &Snapshot{
ID: container.ID,
Name: container.Name,
Pid: container.Pid,
Managed: container.Managed,
ImageID: container.ImageID.String(),
Ports: []types.Port{},
ExposedPorts: make(nat.PortSet),
PublishPorts: make(nat.PortSet),
State: container.State.StateString(),
Status: container.State.String(),
Health: container.State.HealthString(),
Mounts: container.GetMountPoints(),
Created: container.Created,
StartedAt: container.StartedAt,
Running: container.Running,
Paused: container.Paused,
ExitCode: container.ExitCode(),
}
if container.HostConfig != nil {
snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation)
snapshot.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
for publish := range container.HostConfig.PortBindings {
snapshot.PublishPorts[publish] = struct{}{}
}
}
if container.Config != nil {
snapshot.Image = container.Config.Image
snapshot.Labels = container.Config.Labels
for exposed := range container.Config.ExposedPorts {
snapshot.ExposedPorts[exposed] = struct{}{}
}
}
if len(container.Args) > 0 {
args := []string{}
for _, arg := range container.Args {
if strings.Contains(arg, " ") {
args = append(args, fmt.Sprintf("'%s'", arg))
} else {
args = append(args, arg)
}
}
argsAsString := strings.Join(args, " ")
snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
} else {
snapshot.Command = container.Path
}
if container.NetworkSettings != nil {
networks := make(map[string]*network.EndpointSettings)
for name, netw := range container.NetworkSettings.Networks {
if netw == nil || netw.EndpointSettings == nil {
continue
}
networks[name] = &network.EndpointSettings{
EndpointID: netw.EndpointID,
Gateway: netw.Gateway,
IPAddress: netw.IPAddress,
IPPrefixLen: netw.IPPrefixLen,
IPv6Gateway: netw.IPv6Gateway,
GlobalIPv6Address: netw.GlobalIPv6Address,
GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen,
MacAddress: netw.MacAddress,
NetworkID: netw.NetworkID,
}
if netw.IPAMConfig != nil {
networks[name].IPAMConfig = &network.EndpointIPAMConfig{
IPv4Address: netw.IPAMConfig.IPv4Address,
IPv6Address: netw.IPAMConfig.IPv6Address,
}
}
}
snapshot.NetworkSettings = types.SummaryNetworkSettings{Networks: networks}
for port, bindings := range container.NetworkSettings.Ports {
p, err := nat.ParsePort(port.Port())
if err != nil {
logrus.Warnf("invalid port map %+v", err)
continue
}
if len(bindings) == 0 {
snapshot.Ports = append(snapshot.Ports, types.Port{
PrivatePort: uint16(p),
Type: port.Proto(),
})
continue
}
for _, binding := range bindings {
h, err := nat.ParsePort(binding.HostPort)
if err != nil {
logrus.Warnf("invalid host port map %+v", err)
continue
}
snapshot.Ports = append(snapshot.Ports, types.Port{
PrivatePort: uint16(p),
PublicPort: uint16(h),
Type: port.Proto(),
IP: binding.HostIP,
})
}
}
}
return snapshot
}

View File

@ -1,18 +1,51 @@
package container package container
import "github.com/hashicorp/go-memdb" import (
"fmt"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/pkg/registrar"
"github.com/docker/go-connections/nat"
"github.com/hashicorp/go-memdb"
)
const ( const (
memdbTable = "containers" memdbTable = "containers"
memdbIDField = "ID"
memdbIDIndex = "id" memdbIDIndex = "id"
) )
// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a
// versioned ACID in-memory store.
type Snapshot struct {
types.Container
// additional info queries need to filter on
// preserve nanosec resolution for queries
CreatedAt time.Time
StartedAt time.Time
Name string
Pid int
ExitCode int
Running bool
Paused bool
Managed bool
ExposedPorts nat.PortSet
PortBindings nat.PortSet
Health string
HostConfig struct {
Isolation string
}
}
// ViewDB provides an in-memory transactional (ACID) container Store // ViewDB provides an in-memory transactional (ACID) container Store
type ViewDB interface { type ViewDB interface {
Snapshot() View Snapshot(nameIndex *registrar.Registrar) View
Save(snapshot *Snapshot) error Save(*Container) error
Delete(id string) error Delete(*Container) error
} }
// View can be used by readers to avoid locking // View can be used by readers to avoid locking
@ -29,7 +62,7 @@ var schema = &memdb.DBSchema{
memdbIDIndex: { memdbIDIndex: {
Name: memdbIDIndex, Name: memdbIDIndex,
Unique: true, Unique: true,
Indexer: &memdb.StringFieldIndex{Field: memdbIDField}, Indexer: &containerByIDIndexer{},
}, },
}, },
}, },
@ -50,29 +83,38 @@ func NewViewDB() (ViewDB, error) {
} }
// Snapshot provides a consistent read-only View of the database // Snapshot provides a consistent read-only View of the database
func (db *memDB) Snapshot() View { func (db *memDB) Snapshot(index *registrar.Registrar) View {
return &memdbView{db.store.Txn(false)} return &memdbView{
txn: db.store.Txn(false),
nameIndex: index.GetAll(),
}
} }
// Save atomically updates the in-memory store // Save atomically updates the in-memory store from the current on-disk state of a Container.
func (db *memDB) Save(snapshot *Snapshot) error { func (db *memDB) Save(c *Container) error {
txn := db.store.Txn(true) txn := db.store.Txn(true)
defer txn.Commit() defer txn.Commit()
return txn.Insert(memdbTable, snapshot) deepCopy := NewBaseContainer(c.ID, c.Root)
err := deepCopy.FromDisk() // TODO: deal with reserveLabel
if err != nil {
return err
}
return txn.Insert(memdbTable, deepCopy)
} }
// Delete removes an item by ID // Delete removes an item by ID
func (db *memDB) Delete(id string) error { func (db *memDB) Delete(c *Container) error {
txn := db.store.Txn(true) txn := db.store.Txn(true)
defer txn.Commit() defer txn.Commit()
return txn.Delete(memdbTable, &Snapshot{ID: id}) return txn.Delete(memdbTable, NewBaseContainer(c.ID, c.Root))
} }
type memdbView struct { type memdbView struct {
txn *memdb.Txn txn *memdb.Txn
nameIndex map[string][]string
} }
// All returns a all items in this snapshot // All returns a all items in this snapshot. Returned objects must never be modified.
func (v *memdbView) All() ([]Snapshot, error) { func (v *memdbView) All() ([]Snapshot, error) {
var all []Snapshot var all []Snapshot
iter, err := v.txn.Get(memdbTable, memdbIDIndex) iter, err := v.txn.Get(memdbTable, memdbIDIndex)
@ -84,18 +126,167 @@ func (v *memdbView) All() ([]Snapshot, error) {
if item == nil { if item == nil {
break break
} }
snapshot := *(item.(*Snapshot)) // force a copy snapshot := v.transform(item.(*Container))
all = append(all, snapshot) all = append(all, *snapshot)
} }
return all, nil return all, nil
} }
//Get returns an item by id // Get returns an item by id. Returned objects must never be modified.
func (v *memdbView) Get(id string) (*Snapshot, error) { func (v *memdbView) Get(id string) (*Snapshot, error) {
s, err := v.txn.First(memdbTable, memdbIDIndex, id) s, err := v.txn.First(memdbTable, memdbIDIndex, id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
snapshot := *(s.(*Snapshot)) // force a copy return v.transform(s.(*Container)), nil
return &snapshot, nil }
// transform maps a (deep) copied Container object to what queries need.
// A lock on the Container is not held because these are immutable deep copies.
func (v *memdbView) transform(container *Container) *Snapshot {
snapshot := &Snapshot{
Container: types.Container{
ID: container.ID,
Names: v.nameIndex[container.ID],
ImageID: container.ImageID.String(),
Ports: []types.Port{},
Mounts: container.GetMountPoints(),
State: container.State.StateString(),
Status: container.State.String(),
Created: container.Created.Unix(),
},
CreatedAt: container.Created,
StartedAt: container.StartedAt,
Name: container.Name,
Pid: container.Pid,
Managed: container.Managed,
ExposedPorts: make(nat.PortSet),
PortBindings: make(nat.PortSet),
Health: container.HealthString(),
Running: container.Running,
Paused: container.Paused,
ExitCode: container.ExitCode(),
}
if snapshot.Names == nil {
// Dead containers will often have no name, so make sure the response isn't null
snapshot.Names = []string{}
}
if container.HostConfig != nil {
snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation)
for binding := range container.HostConfig.PortBindings {
snapshot.PortBindings[binding] = struct{}{}
}
}
if container.Config != nil {
snapshot.Image = container.Config.Image
snapshot.Labels = container.Config.Labels
for exposed := range container.Config.ExposedPorts {
snapshot.ExposedPorts[exposed] = struct{}{}
}
}
if len(container.Args) > 0 {
args := []string{}
for _, arg := range container.Args {
if strings.Contains(arg, " ") {
args = append(args, fmt.Sprintf("'%s'", arg))
} else {
args = append(args, arg)
}
}
argsAsString := strings.Join(args, " ")
snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
} else {
snapshot.Command = container.Path
}
snapshot.Ports = []types.Port{}
networks := make(map[string]*network.EndpointSettings)
if container.NetworkSettings != nil {
for name, netw := range container.NetworkSettings.Networks {
if netw == nil || netw.EndpointSettings == nil {
continue
}
networks[name] = &network.EndpointSettings{
EndpointID: netw.EndpointID,
Gateway: netw.Gateway,
IPAddress: netw.IPAddress,
IPPrefixLen: netw.IPPrefixLen,
IPv6Gateway: netw.IPv6Gateway,
GlobalIPv6Address: netw.GlobalIPv6Address,
GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen,
MacAddress: netw.MacAddress,
NetworkID: netw.NetworkID,
}
if netw.IPAMConfig != nil {
networks[name].IPAMConfig = &network.EndpointIPAMConfig{
IPv4Address: netw.IPAMConfig.IPv4Address,
IPv6Address: netw.IPAMConfig.IPv6Address,
}
}
}
for port, bindings := range container.NetworkSettings.Ports {
p, err := nat.ParsePort(port.Port())
if err != nil {
logrus.Warnf("invalid port map %+v", err)
continue
}
if len(bindings) == 0 {
snapshot.Ports = append(snapshot.Ports, types.Port{
PrivatePort: uint16(p),
Type: port.Proto(),
})
continue
}
for _, binding := range bindings {
h, err := nat.ParsePort(binding.HostPort)
if err != nil {
logrus.Warnf("invalid host port map %+v", err)
continue
}
snapshot.Ports = append(snapshot.Ports, types.Port{
PrivatePort: uint16(p),
PublicPort: uint16(h),
Type: port.Proto(),
IP: binding.HostIP,
})
}
}
}
snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks}
return snapshot
}
// containerByIDIndexer is used to extract the ID field from Container types.
// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct.
type containerByIDIndexer struct{}
// FromObject implements the memdb.SingleIndexer interface for Container objects
func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) {
c, ok := obj.(*Container)
if !ok {
return false, nil, fmt.Errorf("%T is not a Container", obj)
}
// Add the null character as a terminator
v := c.ID + "\x00"
return true, []byte(v), nil
}
// FromArgs implements the memdb.Indexer interface
func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
if len(args) != 1 {
return nil, fmt.Errorf("must provide only a single argument")
}
arg, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
}
// Add the null character as a terminator
arg += "\x00"
return []byte(arg), nil
} }

View File

@ -1,29 +1,73 @@
package container package container
import "testing" import (
"io/ioutil"
"os"
"path/filepath"
"testing"
func TestViewSave(t *testing.T) { containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/registrar"
"github.com/pborman/uuid"
)
var root string
func TestMain(m *testing.M) {
var err error
root, err = ioutil.TempDir("", "docker-container-test-")
if err != nil {
panic(err)
}
defer os.RemoveAll(root)
os.Exit(m.Run())
}
func newContainer(t *testing.T) *Container {
var (
id = uuid.New()
cRoot = filepath.Join(root, id)
)
if err := os.MkdirAll(cRoot, 0755); err != nil {
t.Fatal(err)
}
c := NewBaseContainer(id, cRoot)
c.HostConfig = &containertypes.HostConfig{}
return c
}
func TestViewSaveDelete(t *testing.T) {
db, err := NewViewDB() db, err := NewViewDB()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
c := NewBaseContainer("id", "root") c := newContainer(t)
if err := c.CheckpointTo(db); err != nil { if err := c.CheckpointTo(db); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := db.Delete(c); err != nil {
t.Fatal(err)
}
} }
func TestViewAll(t *testing.T) { func TestViewAll(t *testing.T) {
var ( var (
db, _ = NewViewDB() db, _ = NewViewDB()
one = NewBaseContainer("id1", "root1") names = registrar.NewRegistrar()
two = NewBaseContainer("id2", "root2") one = newContainer(t)
two = newContainer(t)
) )
one.Pid = 10 one.Pid = 10
if err := one.CheckpointTo(db); err != nil {
t.Fatal(err)
}
two.Pid = 20 two.Pid = 20
one.CheckpointTo(db) if err := two.CheckpointTo(db); err != nil {
two.CheckpointTo(db) t.Fatal(err)
all, err := db.Snapshot().All() }
all, err := db.Snapshot(names).All()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -34,24 +78,29 @@ func TestViewAll(t *testing.T) {
for i := range all { for i := range all {
byID[all[i].ID] = all[i] byID[all[i].ID] = all[i]
} }
if s, ok := byID["id1"]; !ok || s.Pid != 10 { if s, ok := byID[one.ID]; !ok || s.Pid != 10 {
t.Fatalf("expected something different with for id1: %v", s) t.Fatalf("expected something different with for id=%s: %v", one.ID, s)
} }
if s, ok := byID["id2"]; !ok || s.Pid != 20 { if s, ok := byID[two.ID]; !ok || s.Pid != 20 {
t.Fatalf("expected something different with for id1: %v", s) t.Fatalf("expected something different with for id=%s: %v", two.ID, s)
} }
} }
func TestViewGet(t *testing.T) { func TestViewGet(t *testing.T) {
db, _ := NewViewDB() var (
one := NewBaseContainer("id", "root") db, _ = NewViewDB()
names = registrar.NewRegistrar()
one = newContainer(t)
)
one.ImageID = "some-image-123" one.ImageID = "some-image-123"
one.CheckpointTo(db) if err := one.CheckpointTo(db); err != nil {
s, err := db.Snapshot().Get("id") t.Fatal(err)
}
s, err := db.Snapshot(names).Get(one.ID)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if s == nil || s.ImageID != "some-image-123" { if s == nil || s.ImageID != "some-image-123" {
t.Fatalf("expected something different. Got: %v", s) t.Fatalf("expected ImageID=some-image-123. Got: %v", s)
} }
} }

View File

@ -218,7 +218,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *
runconfig.SetDefaultNetModeIfBlank(hostConfig) runconfig.SetDefaultNetModeIfBlank(hostConfig)
container.HostConfig = hostConfig container.HostConfig = hostConfig
return container.CheckpointAndSaveToDisk(daemon.containersReplica) return container.CheckpointTo(daemon.containersReplica)
} }
// verifyContainerSettings performs validation of the hostconfig and config // verifyContainerSettings performs validation of the hostconfig and config

View File

@ -45,10 +45,11 @@ func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []str
return nil return nil
} }
// checkpointAndSave grabs a container lock to safely call container.CheckpointTo
func (daemon *Daemon) checkpointAndSave(container *container.Container) error { func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
if err := container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { if err := container.CheckpointTo(daemon.containersReplica); err != nil {
return fmt.Errorf("Error saving container state: %v", err) return fmt.Errorf("Error saving container state: %v", err)
} }
return nil return nil

View File

@ -167,11 +167,6 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (
runconfig.SetDefaultNetModeIfBlank(container.HostConfig) runconfig.SetDefaultNetModeIfBlank(container.HostConfig)
daemon.updateContainerNetworkSettings(container, endpointsConfigs) daemon.updateContainerNetworkSettings(container, endpointsConfigs)
if err := container.ToDisk(); err != nil {
logrus.Errorf("Error saving new container to disk: %v", err)
return nil, err
}
if err := daemon.Register(container); err != nil { if err := daemon.Register(container); err != nil {
return nil, err return nil, err
} }

View File

@ -217,7 +217,7 @@ func (daemon *Daemon) restore() error {
go func(c *container.Container) { go func(c *container.Container) {
defer wg.Done() defer wg.Done()
daemon.backportMountSpec(c) daemon.backportMountSpec(c)
if err := c.ToDiskLocking(); err != nil { if err := daemon.checkpointAndSave(c); err != nil {
logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk")
} }
@ -289,7 +289,9 @@ func (daemon *Daemon) restore() error {
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
c.RemovalInProgress = false c.RemovalInProgress = false
c.Dead = true c.Dead = true
c.ToDisk() if err := c.CheckpointTo(daemon.containersReplica); err != nil {
logrus.Errorf("Failed to update container %s state: %v", c.ID, err)
}
} }
c.Unlock() c.Unlock()
}(c) }(c)

View File

@ -274,6 +274,10 @@ func TestMigratePre17Volumes(t *testing.T) {
} }
`) `)
viewDB, err := container.NewViewDB()
if err != nil {
t.Fatal(err)
}
volStore, err := store.New(volumeRoot) volStore, err := store.New(volumeRoot)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -284,7 +288,12 @@ func TestMigratePre17Volumes(t *testing.T) {
} }
volumedrivers.Register(drv, volume.DefaultDriverName) volumedrivers.Register(drv, volume.DefaultDriverName)
daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore} daemon := &Daemon{
root: rootDir,
repository: containerRoot,
containersReplica: viewDB,
volumes: volStore,
}
err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -105,15 +105,11 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
// Mark container dead. We don't want anybody to be restarting it. // Mark container dead. We don't want anybody to be restarting it.
container.Lock() container.Lock()
container.Dead = true container.Dead = true
if err = container.CheckpointTo(daemon.containersReplica); err != nil {
container.Unlock()
return err
}
// Save container state to disk. So that if error happens before // Save container state to disk. So that if error happens before
// container meta file got removed from disk, then a restart of // container meta file got removed from disk, then a restart of
// docker should not make a dead container alive. // docker should not make a dead container alive.
if err := container.ToDisk(); err != nil && !os.IsNotExist(err) { if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) {
logrus.Errorf("Error saving dying container to disk: %v", err) logrus.Errorf("Error saving dying container to disk: %v", err)
} }
container.Unlock() container.Unlock()
@ -137,7 +133,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
selinuxFreeLxcContexts(container.ProcessLabel) selinuxFreeLxcContexts(container.ProcessLabel)
daemon.idIndex.Delete(container.ID) daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID) daemon.containers.Delete(container.ID)
daemon.containersReplica.Delete(container.ID) daemon.containersReplica.Delete(container)
if e := daemon.removeMountPoints(container, removeVolume); e != nil { if e := daemon.removeMountPoints(container, removeVolume); e != nil {
logrus.Error(e) logrus.Error(e)
} }

View File

@ -100,18 +100,18 @@ type listContext struct {
*types.ContainerListOptions *types.ContainerListOptions
} }
// byContainerCreated is a temporary type used to sort a list of containers by creation time. // byCreatedDescending is a temporary type used to sort a list of containers by creation time.
type byContainerCreated []container.Snapshot type byCreatedDescending []container.Snapshot
func (r byContainerCreated) Len() int { return len(r) } func (r byCreatedDescending) Len() int { return len(r) }
func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r byCreatedDescending) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r byContainerCreated) Less(i, j int) bool { func (r byCreatedDescending) Less(i, j int) bool {
return r[i].Created.UnixNano() < r[j].Created.UnixNano() return r[j].CreatedAt.UnixNano() < r[i].CreatedAt.UnixNano()
} }
// Containers returns the list of containers to show given the user's filtering. // Containers returns the list of containers to show given the user's filtering.
func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) {
return daemon.reduceContainers(config, daemon.transformContainer) return daemon.reduceContainers(config, daemon.refreshImage)
} }
func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) { func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) {
@ -123,7 +123,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex
// standard behavior of walking the entire container // standard behavior of walking the entire container
// list from the daemon's in-memory store // list from the daemon's in-memory store
all, err := view.All() all, err := view.All()
sort.Sort(sort.Reverse(byContainerCreated(all))) sort.Sort(byCreatedDescending(all))
return all, err return all, err
} }
@ -172,7 +172,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex
// Restore sort-order after filtering // Restore sort-order after filtering
// Created gives us nanosec resolution for sorting // Created gives us nanosec resolution for sorting
sort.Sort(sort.Reverse(byContainerCreated(cntrs))) sort.Sort(byCreatedDescending(cntrs))
return cntrs, nil return cntrs, nil
} }
@ -180,7 +180,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex
// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. // reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer.
func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) {
var ( var (
view = daemon.containersReplica.Snapshot() view = daemon.containersReplica.Snapshot(daemon.nameIndex)
containers = []*types.Container{} containers = []*types.Container{}
) )
@ -503,9 +503,15 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite
} }
} }
networkExist := fmt.Errorf("container part of network") var (
networkExist = errors.New("container part of network")
noNetworks = errors.New("container is not part of any networks")
)
if ctx.filters.Include("network") { if ctx.filters.Include("network") {
err := ctx.filters.WalkValues("network", func(value string) error { err := ctx.filters.WalkValues("network", func(value string) error {
if container.NetworkSettings == nil {
return noNetworks
}
if _, ok := container.NetworkSettings.Networks[value]; ok { if _, ok := container.NetworkSettings.Networks[value]; ok {
return networkExist return networkExist
} }
@ -527,7 +533,7 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite
if len(ctx.publish) > 0 { if len(ctx.publish) > 0 {
shouldSkip := true shouldSkip := true
for port := range ctx.publish { for port := range ctx.publish {
if _, ok := container.PublishPorts[port]; ok { if _, ok := container.PortBindings[port]; ok {
shouldSkip = false shouldSkip = false
break break
} }
@ -553,40 +559,22 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite
return includeContainer return includeContainer
} }
// transformContainer generates the container type expected by the docker ps command. // refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't
func (daemon *Daemon) transformContainer(container *container.Snapshot, ctx *listContext) (*types.Container, error) { func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) {
newC := &types.Container{ c := s.Container
ID: container.ID, image := s.Image // keep the original ref if still valid (hasn't changed)
Names: ctx.names[container.ID], if image != s.ImageID {
ImageID: container.ImageID,
Command: container.Command,
Created: container.Created.Unix(),
State: container.State,
Status: container.Status,
NetworkSettings: &container.NetworkSettings,
Ports: container.Ports,
Labels: container.Labels,
Mounts: container.Mounts,
}
if newC.Names == nil {
// Dead containers will often have no name, so make sure the response isn't null
newC.Names = []string{}
}
newC.HostConfig.NetworkMode = container.HostConfig.NetworkMode
image := container.Image // if possible keep the original ref
if image != container.ImageID {
id, _, err := daemon.GetImageIDAndPlatform(image) id, _, err := daemon.GetImageIDAndPlatform(image)
if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE {
return nil, err return nil, err
} }
if err != nil || id.String() != container.ImageID { if err != nil || id.String() != s.ImageID {
image = container.ImageID // ref changed, we need to use original ID
image = s.ImageID
} }
} }
newC.Image = image c.Image = image
return &c, nil
return newC, nil
} }
// Volumes lists known volumes, using the filter to restrict the range // Volumes lists known volumes, using the filter to restrict the range

View File

@ -90,7 +90,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
daemon.setStateCounter(c) daemon.setStateCounter(c)
defer c.Unlock() defer c.Unlock()
if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { if err := c.CheckpointTo(daemon.containersReplica); err != nil {
return err return err
} }
return daemon.postRunProcessing(c, e) return daemon.postRunProcessing(c, e)
@ -119,7 +119,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
c.HasBeenStartedBefore = true c.HasBeenStartedBefore = true
daemon.setStateCounter(c) daemon.setStateCounter(c)
if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { if err := c.CheckpointTo(daemon.containersReplica); err != nil {
c.Reset(false) c.Reset(false)
return err return err
} }
@ -130,7 +130,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
// Container is already locked in this case // Container is already locked in this case
c.Paused = true c.Paused = true
daemon.setStateCounter(c) daemon.setStateCounter(c)
if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { if err := c.CheckpointTo(daemon.containersReplica); err != nil {
return err return err
} }
daemon.updateHealthMonitor(c) daemon.updateHealthMonitor(c)
@ -139,7 +139,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
// Container is already locked in this case // Container is already locked in this case
c.Paused = false c.Paused = false
daemon.setStateCounter(c) daemon.setStateCounter(c)
if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { if err := c.CheckpointTo(daemon.containersReplica); err != nil {
return err return err
} }
daemon.updateHealthMonitor(c) daemon.updateHealthMonitor(c)

View File

@ -82,7 +82,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
daemon.nameIndex.Release(oldName + k) daemon.nameIndex.Release(oldName + k)
} }
daemon.releaseName(oldName) daemon.releaseName(oldName)
if err = container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { if err = container.CheckpointTo(daemon.containersReplica); err != nil {
return err return err
} }
@ -99,7 +99,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
if err != nil { if err != nil {
container.Name = oldName container.Name = oldName
container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint
if e := container.CheckpointAndSaveToDisk(daemon.containersReplica); e != nil { if e := container.CheckpointTo(daemon.containersReplica); e != nil {
logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e)
} }
} }

View File

@ -52,7 +52,7 @@ func (daemon *Daemon) containerRestart(container *container.Container, seconds i
container.HostConfig.AutoRemove = autoRemove container.HostConfig.AutoRemove = autoRemove
// containerStop will write HostConfig to disk, we shall restore AutoRemove // containerStop will write HostConfig to disk, we shall restore AutoRemove
// in disk too // in disk too
if toDiskErr := container.ToDiskLocking(); toDiskErr != nil { if toDiskErr := daemon.checkpointAndSave(container); toDiskErr != nil {
logrus.Errorf("Write container to disk error: %v", toDiskErr) logrus.Errorf("Write container to disk error: %v", toDiskErr)
} }

View File

@ -58,7 +58,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
// if user has change the network mode on starting, clean up the // if user has change the network mode on starting, clean up the
// old networks. It is a deprecated feature and has been removed in Docker 1.12 // old networks. It is a deprecated feature and has been removed in Docker 1.12
container.NetworkSettings.Networks = nil container.NetworkSettings.Networks = nil
if err := container.ToDisk(); err != nil { if err := container.CheckpointTo(daemon.containersReplica); err != nil {
return err return err
} }
} }
@ -117,7 +117,7 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
if container.ExitCode() == 0 { if container.ExitCode() == 0 {
container.SetExitCode(128) container.SetExitCode(128)
} }
if err := container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { if err := container.CheckpointTo(daemon.containersReplica); err != nil {
logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err) logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err)
} }
container.Reset(false) container.Reset(false)

View File

@ -9,13 +9,14 @@ import (
"github.com/docker/docker/libcontainerd" "github.com/docker/docker/libcontainerd"
) )
// getLibcontainerdCreateOptions callers must hold a lock on the container
func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) {
createOptions := []libcontainerd.CreateOption{} createOptions := []libcontainerd.CreateOption{}
// Ensure a runtime has been assigned to this container // Ensure a runtime has been assigned to this container
if container.HostConfig.Runtime == "" { if container.HostConfig.Runtime == "" {
container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
container.ToDisk() container.CheckpointTo(daemon.containersReplica)
} }
rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime)

View File

@ -38,7 +38,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
if restoreConfig { if restoreConfig {
container.Lock() container.Lock()
container.HostConfig = &backupHostConfig container.HostConfig = &backupHostConfig
container.CheckpointAndSaveToDisk(daemon.containersReplica) container.CheckpointTo(daemon.containersReplica)
container.Unlock() container.Unlock()
} }
}() }()

View File

@ -180,7 +180,7 @@ func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error {
container.MountPoints[destination] = &m container.MountPoints[destination] = &m
} }
} }
return container.ToDisk() return container.CheckpointTo(daemon.containersReplica)
} }
return nil return nil
} }