From 09ee269d998ad04733ef577739fa051df9d3f12e Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 20 Jun 2014 11:53:18 -0400 Subject: [PATCH] devmapper: Add option for specifying the thin pool blocksize Add dm.blocksize option that you can use with --storage-opt to set a specific blocksize for the thin provisioning pool. Also change the default dm-thin-pool blocksize from 64K to 512K. This strikes a balance between the desire to have smaller blocksize given docker's use of snapshots versus the desire to have more performance that comes with using a larger blocksize. But if very small files will be used on average the user is encouraged to override this default. Docker-DCO-1.1-Signed-off-by: Mike Snitzer (github: snitm) --- daemon/graphdriver/devmapper/README.md | 8 ++++++++ daemon/graphdriver/devmapper/deviceset.go | 14 ++++++++++++-- daemon/graphdriver/devmapper/devmapper.go | 8 ++++---- 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md index c8ab1d1ee1..849cfce64c 100644 --- a/daemon/graphdriver/devmapper/README.md +++ b/daemon/graphdriver/devmapper/README.md @@ -126,6 +126,14 @@ Here is the list of supported options: ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + * `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. + + Example use: + + ``docker -d --storage-opt dm.blocksize=64K`` + * `dm.blkdiscard` Enables or disables the use of blkdiscard when removing diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index b0e0819ba8..c42d9c5a72 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -28,6 +28,7 @@ var ( DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + DefaultThinpBlockSize uint32 = 1024 // 512K = 1024 512b sectors ) type DevInfo struct { @@ -78,6 +79,7 @@ type DeviceSet struct { dataDevice string metadataDevice string doBlkDiscard bool + thinpBlockSize uint32 } type DiskUsage struct { @@ -510,7 +512,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { } // Reload with the new block sizes - if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil { + if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { return fmt.Errorf("Unable to reload pool: %s", err) } @@ -640,7 +642,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } defer metadataFile.Close() - if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil { + if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } } @@ -1159,6 +1161,7 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error baseFsSize: DefaultBaseFsSize, filesystem: "ext4", doBlkDiscard: true, + thinpBlockSize: DefaultThinpBlockSize, } foundBlkDiscard := false @@ -1206,6 +1209,13 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error if err != nil { return nil, err } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 default: return nil, fmt.Errorf("Unknown option %s\n", key) } diff --git a/daemon/graphdriver/devmapper/devmapper.go b/daemon/graphdriver/devmapper/devmapper.go index 2590ec0fc1..ee4f2a8159 100644 --- a/daemon/graphdriver/devmapper/devmapper.go +++ b/daemon/graphdriver/devmapper/devmapper.go @@ -328,7 +328,7 @@ func BlockDeviceDiscard(path string) error { } // This is the programmatic example of "dmsetup create" -func createPool(poolName string, dataFile, metadataFile *os.File) error { +func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := createTask(DeviceCreate, poolName) if task == nil { return err @@ -339,7 +339,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error { return fmt.Errorf("Can't get data size %s", err) } - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target %s", err) } @@ -358,7 +358,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error { return nil } -func reloadPool(poolName string, dataFile, metadataFile *os.File) error { +func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := createTask(DeviceReload, poolName) if task == nil { return err @@ -369,7 +369,7 @@ func reloadPool(poolName string, dataFile, metadataFile *os.File) error { return fmt.Errorf("Can't get data size %s", err) } - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target %s", err) }