1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Extract container store from the daemon.

- Generalize in an interface.
- Stop abusing of List for everything.

Signed-off-by: David Calavera <david.calavera@gmail.com>
This commit is contained in:
David Calavera 2016-01-15 18:55:46 -05:00
parent 402ba93f68
commit 3c82fad441
10 changed files with 289 additions and 120 deletions

View file

@ -1,34 +1,35 @@
package daemon package container
import ( import "sort"
"sort"
"github.com/docker/docker/container"
)
// History is a convenience type for storing a list of containers, // History is a convenience type for storing a list of containers,
// ordered by creation date. // sorted by creation date in descendant order.
type History []*container.Container type History []*Container
// Len returns the number of containers in the history.
func (history *History) Len() int { func (history *History) Len() int {
return len(*history) return len(*history)
} }
// Less compares two containers and returns true if the second one
// was created before the first one.
func (history *History) Less(i, j int) bool { func (history *History) Less(i, j int) bool {
containers := *history containers := *history
return containers[j].Created.Before(containers[i].Created) return containers[j].Created.Before(containers[i].Created)
} }
// Swap switches containers i and j positions in the history.
func (history *History) Swap(i, j int) { func (history *History) Swap(i, j int) {
containers := *history containers := *history
containers[i], containers[j] = containers[j], containers[i] containers[i], containers[j] = containers[j], containers[i]
} }
// Add the given container to history. // Add the given container to history.
func (history *History) Add(container *container.Container) { func (history *History) Add(container *Container) {
*history = append(*history, container) *history = append(*history, container)
} }
// sort orders the history by creation date in descendant order.
func (history *History) sort() { func (history *History) sort() {
sort.Sort(history) sort.Sort(history)
} }

91
container/memory_store.go Normal file
View file

@ -0,0 +1,91 @@
package container
import "sync"
// memoryStore implements a Store in memory.
type memoryStore struct {
s map[string]*Container
sync.Mutex
}
// NewMemoryStore initializes a new memory store.
func NewMemoryStore() Store {
return &memoryStore{
s: make(map[string]*Container),
}
}
// Add appends a new container to the memory store.
// It overrides the id if it existed before.
func (c *memoryStore) Add(id string, cont *Container) {
c.Lock()
c.s[id] = cont
c.Unlock()
}
// Get returns a container from the store by id.
func (c *memoryStore) Get(id string) *Container {
c.Lock()
res := c.s[id]
c.Unlock()
return res
}
// Delete removes a container from the store by id.
func (c *memoryStore) Delete(id string) {
c.Lock()
delete(c.s, id)
c.Unlock()
}
// List returns a sorted list of containers from the store.
// The containers are ordered by creation date.
func (c *memoryStore) List() []*Container {
containers := new(History)
c.Lock()
for _, cont := range c.s {
containers.Add(cont)
}
c.Unlock()
containers.sort()
return *containers
}
// Size returns the number of containers in the store.
func (c *memoryStore) Size() int {
c.Lock()
defer c.Unlock()
return len(c.s)
}
// First returns the first container found in the store by a given filter.
func (c *memoryStore) First(filter StoreFilter) *Container {
c.Lock()
defer c.Unlock()
for _, cont := range c.s {
if filter(cont) {
return cont
}
}
return nil
}
// ApplyAll calls the reducer function with every container in the store.
// This operation is asyncronous in the memory store.
func (c *memoryStore) ApplyAll(apply StoreReducer) {
c.Lock()
defer c.Unlock()
wg := new(sync.WaitGroup)
for _, cont := range c.s {
wg.Add(1)
go func(container *Container) {
apply(container)
wg.Done()
}(cont)
}
wg.Wait()
}
var _ Store = &memoryStore{}

View file

@ -0,0 +1,106 @@
package container
import (
"testing"
"time"
)
func TestNewMemoryStore(t *testing.T) {
s := NewMemoryStore()
m, ok := s.(*memoryStore)
if !ok {
t.Fatalf("store is not a memory store %v", s)
}
if m.s == nil {
t.Fatal("expected store map to not be nil")
}
}
func TestAddContainers(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
if s.Size() != 1 {
t.Fatalf("expected store size 1, got %v", s.Size())
}
}
func TestGetContainer(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
c := s.Get("id")
if c == nil {
t.Fatal("expected container to not be nil")
}
}
func TestDeleteContainer(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
s.Delete("id")
if c := s.Get("id"); c != nil {
t.Fatalf("expected container to be nil after removal, got %v", c)
}
if s.Size() != 0 {
t.Fatalf("expected store size to be 0, got %v", s.Size())
}
}
func TestListContainers(t *testing.T) {
s := NewMemoryStore()
cont := NewBaseContainer("id", "root")
cont.Created = time.Now()
cont2 := NewBaseContainer("id2", "root")
cont2.Created = time.Now().Add(24 * time.Hour)
s.Add("id", cont)
s.Add("id2", cont2)
list := s.List()
if len(list) != 2 {
t.Fatalf("expected list size 2, got %v", len(list))
}
if list[0].ID != "id2" {
t.Fatalf("expected older container to be first, got %v", list[0].ID)
}
}
func TestFirstContainer(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
s.Add("id2", NewBaseContainer("id2", "root"))
first := s.First(func(cont *Container) bool {
return cont.ID == "id2"
})
if first == nil {
t.Fatal("expected container to not be nil")
}
if first.ID != "id2" {
t.Fatalf("expected id2, got %v", first)
}
}
func TestApplyAllContainer(t *testing.T) {
s := NewMemoryStore()
s.Add("id", NewBaseContainer("id", "root"))
s.Add("id2", NewBaseContainer("id2", "root"))
s.ApplyAll(func(cont *Container) {
if cont.ID == "id2" {
cont.ID = "newID"
}
})
cont := s.Get("id2")
if cont == nil {
t.Fatal("expected container to not be nil")
}
if cont.ID != "newID" {
t.Fatalf("expected newID, got %v", cont)
}
}

28
container/store.go Normal file
View file

@ -0,0 +1,28 @@
package container
// StoreFilter defines a function to filter
// container in the store.
type StoreFilter func(*Container) bool
// StoreReducer defines a function to
// manipulate containers in the store
type StoreReducer func(*Container)
// Store defines an interface that
// any container store must implement.
type Store interface {
// Add appends a new container to the store.
Add(string, *Container)
// Get returns a container from the store by the identifier it was stored with.
Get(string) *Container
// Delete removes a container from the store by the identifier it was stored with.
Delete(string)
// List returns a list of containers from the store.
List() []*Container
// Size returns the number of containers in the store.
Size() int
// First returns the first container found in the store by a given filter.
First(StoreFilter) *Container
// ApplyAll calls the reducer function with every container in the store.
ApplyAll(StoreReducer)
}

View file

@ -99,46 +99,11 @@ func (e ErrImageDoesNotExist) Error() string {
return fmt.Sprintf("no such id: %s", e.RefOrID) return fmt.Sprintf("no such id: %s", e.RefOrID)
} }
type contStore struct {
s map[string]*container.Container
sync.Mutex
}
func (c *contStore) Add(id string, cont *container.Container) {
c.Lock()
c.s[id] = cont
c.Unlock()
}
func (c *contStore) Get(id string) *container.Container {
c.Lock()
res := c.s[id]
c.Unlock()
return res
}
func (c *contStore) Delete(id string) {
c.Lock()
delete(c.s, id)
c.Unlock()
}
func (c *contStore) List() []*container.Container {
containers := new(History)
c.Lock()
for _, cont := range c.s {
containers.Add(cont)
}
c.Unlock()
containers.sort()
return *containers
}
// Daemon holds information about the Docker daemon. // Daemon holds information about the Docker daemon.
type Daemon struct { type Daemon struct {
ID string ID string
repository string repository string
containers *contStore containers container.Store
execCommands *exec.Store execCommands *exec.Store
referenceStore reference.Store referenceStore reference.Store
downloadManager *xfer.LayerDownloadManager downloadManager *xfer.LayerDownloadManager
@ -794,7 +759,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
d.ID = trustKey.PublicKey().KeyID() d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo d.repository = daemonRepo
d.containers = &contStore{s: make(map[string]*container.Container)} d.containers = container.NewMemoryStore()
d.execCommands = exec.NewStore() d.execCommands = exec.NewStore()
d.referenceStore = referenceStore d.referenceStore = referenceStore
d.distributionMetadataStore = distributionMetadataStore d.distributionMetadataStore = distributionMetadataStore
@ -873,24 +838,18 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
func (daemon *Daemon) Shutdown() error { func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true daemon.shutdown = true
if daemon.containers != nil { if daemon.containers != nil {
group := sync.WaitGroup{}
logrus.Debug("starting clean shutdown of all containers...") logrus.Debug("starting clean shutdown of all containers...")
for _, cont := range daemon.List() { daemon.containers.ApplyAll(func(c *container.Container) {
if !cont.IsRunning() { if !c.IsRunning() {
continue return
} }
logrus.Debugf("stopping %s", cont.ID) logrus.Debugf("stopping %s", c.ID)
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.shutdownContainer(c); err != nil { if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err) logrus.Errorf("Stop container error: %v", err)
return return
} }
logrus.Debugf("container stopped %s", c.ID) logrus.Debugf("container stopped %s", c.ID)
}(cont) })
}
group.Wait()
} }
// trigger libnetwork Stop only if it's initialized // trigger libnetwork Stop only if it's initialized

View file

@ -61,15 +61,12 @@ func TestGetContainer(t *testing.T) {
}, },
} }
store := &contStore{ store := container.NewMemoryStore()
s: map[string]*container.Container{ store.Add(c1.ID, c1)
c1.ID: c1, store.Add(c2.ID, c2)
c2.ID: c2, store.Add(c3.ID, c3)
c3.ID: c3, store.Add(c4.ID, c4)
c4.ID: c4, store.Add(c5.ID, c5)
c5.ID: c5,
},
}
index := truncindex.NewTruncIndex([]string{}) index := truncindex.NewTruncIndex([]string{})
index.Add(c1.ID) index.Add(c1.ID)

View file

@ -20,7 +20,7 @@ func TestContainerDoubleDelete(t *testing.T) {
repository: tmp, repository: tmp,
root: tmp, root: tmp,
} }
daemon.containers = &contStore{s: make(map[string]*container.Container)} daemon.containers = container.NewMemoryStore()
container := &container.Container{ container := &container.Container{
CommonContainer: container.CommonContainer{ CommonContainer: container.CommonContainer{

View file

@ -179,13 +179,9 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool {
// getContainerUsingImage returns a container that was created using the given // getContainerUsingImage returns a container that was created using the given
// imageID. Returns nil if there is no such container. // imageID. Returns nil if there is no such container.
func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container {
for _, container := range daemon.List() { return daemon.containers.First(func(c *container.Container) bool {
if container.ImageID == imageID { return c.ImageID == imageID
return container })
}
}
return nil
} }
// removeImageRef attempts to parse and remove the given image reference from // removeImageRef attempts to parse and remove the given image reference from
@ -328,13 +324,10 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
if mask&conflictRunningContainer != 0 { if mask&conflictRunningContainer != 0 {
// Check if any running container is using the image. // Check if any running container is using the image.
for _, container := range daemon.List() { running := func(c *container.Container) bool {
if !container.IsRunning() { return c.IsRunning() && c.ImageID == imgID
// Skip this until we check for soft conflicts later.
continue
} }
if container := daemon.containers.First(running); container != nil {
if container.ImageID == imgID {
return &imageDeleteConflict{ return &imageDeleteConflict{
imgID: imgID, imgID: imgID,
hard: true, hard: true,
@ -343,7 +336,6 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
} }
} }
} }
}
// Check if any repository tags/digest reference this image. // Check if any repository tags/digest reference this image.
if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 { if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 {
@ -355,13 +347,10 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
if mask&conflictStoppedContainer != 0 { if mask&conflictStoppedContainer != 0 {
// Check if any stopped containers reference this image. // Check if any stopped containers reference this image.
for _, container := range daemon.List() { stopped := func(c *container.Container) bool {
if container.IsRunning() { return !c.IsRunning() && c.ImageID == imgID
// Skip this as it was checked above in hard conflict conditions.
continue
} }
if container := daemon.containers.First(stopped); container != nil {
if container.ImageID == imgID {
return &imageDeleteConflict{ return &imageDeleteConflict{
imgID: imgID, imgID: imgID,
used: true, used: true,
@ -369,7 +358,6 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
} }
} }
} }
}
return nil return nil
} }

View file

@ -4,9 +4,11 @@ import (
"os" "os"
"runtime" "runtime"
"strings" "strings"
"sync/atomic"
"time" "time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/docker/container"
"github.com/docker/docker/dockerversion" "github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/kernel"
@ -54,24 +56,24 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
initPath := utils.DockerInitPath("") initPath := utils.DockerInitPath("")
sysInfo := sysinfo.New(true) sysInfo := sysinfo.New(true)
var cRunning, cPaused, cStopped int var cRunning, cPaused, cStopped int32
for _, c := range daemon.List() { daemon.containers.ApplyAll(func(c *container.Container) {
switch c.StateString() { switch c.StateString() {
case "paused": case "paused":
cPaused++ atomic.AddInt32(&cPaused, 1)
case "running": case "running":
cRunning++ atomic.AddInt32(&cRunning, 1)
default: default:
cStopped++ atomic.AddInt32(&cStopped, 1)
}
} }
})
v := &types.Info{ v := &types.Info{
ID: daemon.ID, ID: daemon.ID,
Containers: len(daemon.List()), Containers: int(cRunning + cPaused + cStopped),
ContainersRunning: cRunning, ContainersRunning: int(cRunning),
ContainersPaused: cPaused, ContainersPaused: int(cPaused),
ContainersStopped: cStopped, ContainersStopped: int(cStopped),
Images: len(daemon.imageStore.Map()), Images: len(daemon.imageStore.Map()),
Driver: daemon.GraphDriverName(), Driver: daemon.GraphDriverName(),
DriverStatus: daemon.layerStore.DriverStatus(), DriverStatus: daemon.layerStore.DriverStatus(),

View file

@ -39,12 +39,9 @@ func TestMigrateLegacySqliteLinks(t *testing.T) {
}, },
} }
store := &contStore{ store := container.NewMemoryStore()
s: map[string]*container.Container{ store.Add(c1.ID, c1)
c1.ID: c1, store.Add(c2.ID, c2)
c2.ID: c2,
},
}
d := &Daemon{root: tmpDir, containers: store} d := &Daemon{root: tmpDir, containers: store}
db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db"))