mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #20275 from cpuguy83/finer_graph_locks
Finer graph locks
This commit is contained in:
commit
2f797bb1d9
3 changed files with 143 additions and 39 deletions
|
@ -66,6 +66,7 @@ func init() {
|
|||
type data struct {
|
||||
referenceCount int
|
||||
path string
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// Driver contains information about the filesystem mounted.
|
||||
|
@ -76,7 +77,7 @@ type Driver struct {
|
|||
root string
|
||||
uidMaps []idtools.IDMap
|
||||
gidMaps []idtools.IDMap
|
||||
sync.Mutex // Protects concurrent modification to active
|
||||
globalLock sync.Mutex // Protects concurrent modification to active
|
||||
active map[string]*data
|
||||
}
|
||||
|
||||
|
@ -202,7 +203,20 @@ func (a *Driver) Exists(id string) bool {
|
|||
// Create three folders for each id
|
||||
// mnt, layers, and diff
|
||||
func (a *Driver) Create(id, parent, mountLabel string) error {
|
||||
if err := a.createDirsFor(id); err != nil {
|
||||
m := a.getActive(id)
|
||||
m.Lock()
|
||||
|
||||
var err error
|
||||
defer func() {
|
||||
a.globalLock.Lock()
|
||||
if err != nil {
|
||||
delete(a.active, id)
|
||||
}
|
||||
a.globalLock.Unlock()
|
||||
m.Unlock()
|
||||
}()
|
||||
|
||||
if err = a.createDirsFor(id); err != nil {
|
||||
return err
|
||||
}
|
||||
// Write the layers metadata
|
||||
|
@ -213,21 +227,22 @@ func (a *Driver) Create(id, parent, mountLabel string) error {
|
|||
defer f.Close()
|
||||
|
||||
if parent != "" {
|
||||
ids, err := getParentIds(a.rootPath(), parent)
|
||||
var ids []string
|
||||
ids, err = getParentIds(a.rootPath(), parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintln(f, parent); err != nil {
|
||||
if _, err = fmt.Fprintln(f, parent); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, i := range ids {
|
||||
if _, err := fmt.Fprintln(f, i); err != nil {
|
||||
if _, err = fmt.Fprintln(f, i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
a.active[id] = &data{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -251,11 +266,10 @@ func (a *Driver) createDirsFor(id string) error {
|
|||
|
||||
// Remove will unmount and remove the given id.
|
||||
func (a *Driver) Remove(id string) error {
|
||||
// Protect the a.active from concurrent access
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
m := a.getActive(id)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m := a.active[id]
|
||||
if m != nil {
|
||||
if m.referenceCount > 0 {
|
||||
return nil
|
||||
|
@ -285,37 +299,54 @@ func (a *Driver) Remove(id string) error {
|
|||
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if m != nil {
|
||||
a.globalLock.Lock()
|
||||
delete(a.active, id)
|
||||
a.globalLock.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the rootfs path for the id.
|
||||
// This will mount the dir at it's given path
|
||||
func (a *Driver) Get(id, mountLabel string) (string, error) {
|
||||
ids, err := getParentIds(a.rootPath(), id)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
m := a.getActive(id)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
parents, err := a.getParentLayerPaths(id)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var parentLocks []*data
|
||||
a.globalLock.Lock()
|
||||
for _, p := range parents {
|
||||
parentM, exists := a.active[p]
|
||||
if !exists {
|
||||
parentM = &data{}
|
||||
a.active[p] = parentM
|
||||
}
|
||||
ids = []string{}
|
||||
parentLocks = append(parentLocks, parentM)
|
||||
}
|
||||
a.globalLock.Unlock()
|
||||
|
||||
// Protect the a.active from concurrent access
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
m := a.active[id]
|
||||
if m == nil {
|
||||
m = &data{}
|
||||
a.active[id] = m
|
||||
for _, l := range parentLocks {
|
||||
l.Lock()
|
||||
}
|
||||
defer func() {
|
||||
for _, l := range parentLocks {
|
||||
l.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
// If a dir does not have a parent ( no layers )do not try to mount
|
||||
// just return the diff path to the data
|
||||
m.path = path.Join(a.rootPath(), "diff", id)
|
||||
if len(ids) > 0 {
|
||||
if len(parents) > 0 {
|
||||
m.path = path.Join(a.rootPath(), "mnt", id)
|
||||
if m.referenceCount == 0 {
|
||||
if err := a.mount(id, m, mountLabel); err != nil {
|
||||
if err := a.mount(id, m, mountLabel, parents); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
@ -324,13 +355,24 @@ func (a *Driver) Get(id, mountLabel string) (string, error) {
|
|||
return m.path, nil
|
||||
}
|
||||
|
||||
func (a *Driver) getActive(id string) *data {
|
||||
// Protect the a.active from concurrent access
|
||||
a.globalLock.Lock()
|
||||
m, exists := a.active[id]
|
||||
if !exists {
|
||||
m = &data{}
|
||||
a.active[id] = m
|
||||
}
|
||||
a.globalLock.Unlock()
|
||||
return m
|
||||
}
|
||||
|
||||
// Put unmounts and updates list of active mounts.
|
||||
func (a *Driver) Put(id string) error {
|
||||
// Protect the a.active from concurrent access
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
m := a.getActive(id)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m := a.active[id]
|
||||
if m == nil {
|
||||
// but it might be still here
|
||||
if a.Exists(id) {
|
||||
|
@ -342,6 +384,7 @@ func (a *Driver) Put(id string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if count := m.referenceCount; count > 1 {
|
||||
m.referenceCount = count - 1
|
||||
} else {
|
||||
|
@ -350,7 +393,6 @@ func (a *Driver) Put(id string) error {
|
|||
if ids != nil && len(ids) > 0 {
|
||||
a.unmount(m)
|
||||
}
|
||||
delete(a.active, id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -426,7 +468,7 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
|||
return layers, nil
|
||||
}
|
||||
|
||||
func (a *Driver) mount(id string, m *data, mountLabel string) error {
|
||||
func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) error {
|
||||
// If the id is mounted or we get an error return
|
||||
if mounted, err := a.mounted(m); err != nil || mounted {
|
||||
return err
|
||||
|
@ -437,11 +479,6 @@ func (a *Driver) mount(id string, m *data, mountLabel string) error {
|
|||
rw = path.Join(a.rootPath(), "diff", id)
|
||||
)
|
||||
|
||||
layers, err := a.getParentLayerPaths(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
|
||||
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
|
||||
}
|
||||
|
|
|
@ -9,11 +9,13 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -25,7 +27,7 @@ func init() {
|
|||
reexec.Init()
|
||||
}
|
||||
|
||||
func testInit(dir string, t *testing.T) graphdriver.Driver {
|
||||
func testInit(dir string, t testing.TB) graphdriver.Driver {
|
||||
d, err := Init(dir, nil, nil, nil)
|
||||
if err != nil {
|
||||
if err == graphdriver.ErrNotSupported {
|
||||
|
@ -37,7 +39,7 @@ func testInit(dir string, t *testing.T) graphdriver.Driver {
|
|||
return d
|
||||
}
|
||||
|
||||
func newDriver(t *testing.T) *Driver {
|
||||
func newDriver(t testing.TB) *Driver {
|
||||
if err := os.MkdirAll(tmp, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -732,3 +734,68 @@ func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) {
|
|||
zeroes += "0"
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkConcurrentAccess(b *testing.B) {
|
||||
b.StopTimer()
|
||||
b.ResetTimer()
|
||||
|
||||
d := newDriver(b)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
numConcurent := 256
|
||||
// create a bunch of ids
|
||||
var ids []string
|
||||
for i := 0; i < numConcurent; i++ {
|
||||
ids = append(ids, stringid.GenerateNonCryptoID())
|
||||
}
|
||||
|
||||
if err := d.Create(ids[0], "", ""); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := d.Create(ids[1], ids[0], ""); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
parent := ids[1]
|
||||
ids = append(ids[2:])
|
||||
|
||||
chErr := make(chan error, numConcurent)
|
||||
var outerGroup sync.WaitGroup
|
||||
outerGroup.Add(len(ids))
|
||||
b.StartTimer()
|
||||
|
||||
// here's the actual bench
|
||||
for _, id := range ids {
|
||||
go func(id string) {
|
||||
defer outerGroup.Done()
|
||||
if err := d.Create(id, parent, ""); err != nil {
|
||||
b.Logf("Create %s failed", id)
|
||||
chErr <- err
|
||||
return
|
||||
}
|
||||
var innerGroup sync.WaitGroup
|
||||
for i := 0; i < b.N; i++ {
|
||||
innerGroup.Add(1)
|
||||
go func() {
|
||||
d.Get(id, "")
|
||||
d.Put(id)
|
||||
innerGroup.Done()
|
||||
}()
|
||||
}
|
||||
innerGroup.Wait()
|
||||
d.Remove(id)
|
||||
}(id)
|
||||
}
|
||||
|
||||
outerGroup.Wait()
|
||||
b.StopTimer()
|
||||
close(chErr)
|
||||
for err := range chErr {
|
||||
if err != nil {
|
||||
b.Log(err)
|
||||
b.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
// Unmount the target specified.
|
||||
func Unmount(target string) error {
|
||||
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
|
||||
logrus.Errorf("Couldn't run auplink before unmount: %s", err)
|
||||
logrus.Errorf("Couldn't run auplink before unmount %s: %s", target, err)
|
||||
}
|
||||
if err := syscall.Unmount(target, 0); err != nil {
|
||||
return err
|
||||
|
|
Loading…
Reference in a new issue