mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
devmapper: Add option for specifying the thin pool blocksize
Add dm.blocksize option that you can use with --storage-opt to set a specific blocksize for the thin provisioning pool. Also change the default dm-thin-pool blocksize from 64K to 512K. This strikes a balance between the desire to have smaller blocksize given docker's use of snapshots versus the desire to have more performance that comes with using a larger blocksize. But if very small files will be used on average the user is encouraged to override this default. Docker-DCO-1.1-Signed-off-by: Mike Snitzer <snitzer@redhat.com> (github: snitm)
This commit is contained in:
parent
2470a5ed99
commit
09ee269d99
3 changed files with 24 additions and 6 deletions
|
@ -126,6 +126,14 @@ Here is the list of supported options:
|
|||
|
||||
``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1``
|
||||
|
||||
* `dm.blocksize`
|
||||
|
||||
Specifies a custom blocksize to use for the thin pool.
|
||||
|
||||
Example use:
|
||||
|
||||
``docker -d --storage-opt dm.blocksize=64K``
|
||||
|
||||
* `dm.blkdiscard`
|
||||
|
||||
Enables or disables the use of blkdiscard when removing
|
||||
|
|
|
@ -28,6 +28,7 @@ var (
|
|||
DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024
|
||||
DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024
|
||||
DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024
|
||||
DefaultThinpBlockSize uint32 = 1024 // 512K = 1024 512b sectors
|
||||
)
|
||||
|
||||
type DevInfo struct {
|
||||
|
@ -78,6 +79,7 @@ type DeviceSet struct {
|
|||
dataDevice string
|
||||
metadataDevice string
|
||||
doBlkDiscard bool
|
||||
thinpBlockSize uint32
|
||||
}
|
||||
|
||||
type DiskUsage struct {
|
||||
|
@ -510,7 +512,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
|
|||
}
|
||||
|
||||
// Reload with the new block sizes
|
||||
if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil {
|
||||
if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil {
|
||||
return fmt.Errorf("Unable to reload pool: %s", err)
|
||||
}
|
||||
|
||||
|
@ -640,7 +642,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||
}
|
||||
defer metadataFile.Close()
|
||||
|
||||
if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil {
|
||||
if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1159,6 +1161,7 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error
|
|||
baseFsSize: DefaultBaseFsSize,
|
||||
filesystem: "ext4",
|
||||
doBlkDiscard: true,
|
||||
thinpBlockSize: DefaultThinpBlockSize,
|
||||
}
|
||||
|
||||
foundBlkDiscard := false
|
||||
|
@ -1206,6 +1209,13 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "dm.blocksize":
|
||||
size, err := units.RAMInBytes(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// convert to 512b sectors
|
||||
devices.thinpBlockSize = uint32(size) >> 9
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown option %s\n", key)
|
||||
}
|
||||
|
|
|
@ -328,7 +328,7 @@ func BlockDeviceDiscard(path string) error {
|
|||
}
|
||||
|
||||
// This is the programmatic example of "dmsetup create"
|
||||
func createPool(poolName string, dataFile, metadataFile *os.File) error {
|
||||
func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
|
||||
task, err := createTask(DeviceCreate, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
|
@ -339,7 +339,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error {
|
|||
return fmt.Errorf("Can't get data size %s", err)
|
||||
}
|
||||
|
||||
params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing"
|
||||
params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
|
||||
if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil {
|
||||
return fmt.Errorf("Can't add target %s", err)
|
||||
}
|
||||
|
@ -358,7 +358,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func reloadPool(poolName string, dataFile, metadataFile *os.File) error {
|
||||
func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
|
||||
task, err := createTask(DeviceReload, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
|
@ -369,7 +369,7 @@ func reloadPool(poolName string, dataFile, metadataFile *os.File) error {
|
|||
return fmt.Errorf("Can't get data size %s", err)
|
||||
}
|
||||
|
||||
params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing"
|
||||
params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
|
||||
if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil {
|
||||
return fmt.Errorf("Can't add target %s", err)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue