Merge pull request #9006 from snitm/thin-pool-improvements

Thin pool improvements
This commit is contained in:
Vincent Batts 2014-11-26 14:44:09 -05:00
commit b47ff77b5c
4 changed files with 58 additions and 9 deletions

View File

@ -100,6 +100,25 @@ Here is the list of supported options:
``docker -d --storage-opt dm.mountopt=nodiscard``
* `dm.thinpooldev`
Specifies a custom blockdevice to use for the thin pool.
If using a block device for device mapper storage, ideally lvm2
would be used to create/manage the thin-pool volume that is then
handed to docker to exclusively create/manage the thin and thin
snapshot volumes needed for it's containers. Managing the thin-pool
outside of docker makes for the most feature-rich method of having
docker utilize device mapper thin provisioning as the backing
storage for docker's containers. lvm2-based thin-pool management
feature highlights include: automatic or interactive thin-pool
resize support, dynamically change thin-pool features, automatic
thinp metadata checking when lvm2 activates the thin-pool, etc.
Example use:
``docker -d --storage-opt dm.thinpooldev=/dev/mapper/thin-pool``
* `dm.datadev`
Specifies a custom blockdevice to use for data for the thin pool.

View File

@ -84,6 +84,7 @@ type DeviceSet struct {
metadataDevice string
doBlkDiscard bool
thinpBlockSize uint32
thinPoolDevice string
}
type DiskUsage struct {
@ -150,7 +151,10 @@ func (devices *DeviceSet) oldMetadataFile() string {
}
func (devices *DeviceSet) getPoolName() string {
return devices.devicePrefix + "-pool"
if devices.thinPoolDevice == "" {
return devices.devicePrefix + "-pool"
}
return devices.thinPoolDevice
}
func (devices *DeviceSet) getPoolDevName() string {
@ -411,7 +415,22 @@ func (devices *DeviceSet) setupBaseImage() error {
}
}
log.Debugf("Initializing base device-manager snapshot")
if devices.thinPoolDevice != "" && oldInfo == nil {
_, transactionId, dataUsed, _, _, _, err := devices.poolStatus()
if err != nil {
return err
}
if dataUsed != 0 {
return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks",
devices.thinPoolDevice)
}
if transactionId != 0 {
return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id",
devices.thinPoolDevice)
}
}
log.Debugf("Initializing base device-mapper thin volume")
id := devices.NextDeviceId
@ -430,7 +449,7 @@ func (devices *DeviceSet) setupBaseImage() error {
return err
}
log.Debugf("Creating filesystem on base device-manager snapshot")
log.Debugf("Creating filesystem on base device-mapper thin volume")
if err = devices.activateDeviceIfNeeded(info); err != nil {
return err
@ -605,7 +624,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
log.Debugf("Generated prefix: %s", devices.devicePrefix)
// Check for the existence of the device <prefix>-pool
// Check for the existence of the thin-pool device
log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
info, err := devicemapper.GetInfo(devices.getPoolName())
if info == nil {
@ -624,7 +643,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
createdLoopback := false
// If the pool doesn't exist, create it
if info.Exists == 0 {
if info.Exists == 0 && devices.thinPoolDevice == "" {
log.Debugf("Pool doesn't exist. Creating it.")
var (
@ -988,8 +1007,10 @@ func (devices *DeviceSet) Shutdown() error {
}
devices.Lock()
if err := devices.deactivatePool(); err != nil {
log.Debugf("Shutdown deactivate pool , error: %s", err)
if devices.thinPoolDevice == "" {
if err := devices.deactivatePool(); err != nil {
log.Debugf("Shutdown deactivate pool , error: %s", err)
}
}
devices.saveDeviceSetMetaData()
@ -1275,6 +1296,8 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error
devices.metadataDevice = val
case "dm.datadev":
devices.dataDevice = val
case "dm.thinpooldev":
devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/")
case "dm.blkdiscard":
foundBlkDiscard = true
devices.doBlkDiscard, err = strconv.ParseBool(val)
@ -1294,7 +1317,7 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error
}
// By default, don't do blk discard hack on raw devices, its rarely useful and is expensive
if !foundBlkDiscard && devices.dataDevice != "" {
if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") {
devices.doBlkDiscard = false
}

View File

@ -384,7 +384,8 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
}
var cookie uint = 0
if err := task.SetCookie(&cookie, 0); err != nil {
var flags uint16 = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
if err := task.SetCookie(&cookie, flags); err != nil {
return fmt.Errorf("Can't set cookie %s", err)
}
defer UdevWait(cookie)

View File

@ -82,6 +82,12 @@ const (
LoNameSize = C.LO_NAME_SIZE
)
const (
DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG
DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG
DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG
)
var (
DmGetLibraryVersion = dmGetLibraryVersionFct
DmGetNextTarget = dmGetNextTargetFct