mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
devmapper: Add option for specifying an lvm2 created thin-pool device
Ideally lvm2 would be used to create/manage the thin-pool volume that is then handed to docker to exclusively create/manage the thin and thin snapshot volumes needed for it's containers. Managing the thin-pool outside of docker makes for the most feature-rich method of having docker utilize device mapper thin provisioning as the backing storage for docker's containers. lvm2-based thin-pool management feature highlights include: automatic or interactive thin-pool resize support, dynamically change thin-pool features, automatic thinp metadata checking when lvm2 activates the thin-pool, etc. Docker will not activate/deactivate the specified thin-pool device but it will exclusively manage/create thin and thin snapshot volumes in it. Docker will not take ownership of the specified thin-pool device unless it has 0 data blocks used and a transaction id of 0. This should help guard against using a thin-pool that is already in use. Also fix typos in setupBaseImage() relative to the thin volume type of the base image. Docker-DCO-1.1-Signed-off-by: Mike Snitzer <snitzer@redhat.com> (github: snitm)
This commit is contained in:
parent
ad6467f9e1
commit
2b10749cdd
2 changed files with 49 additions and 7 deletions
|
@ -100,6 +100,25 @@ Here is the list of supported options:
|
|||
|
||||
``docker -d --storage-opt dm.mountopt=nodiscard``
|
||||
|
||||
* `dm.thinpooldev`
|
||||
|
||||
Specifies a custom blockdevice to use for the thin pool.
|
||||
|
||||
If using a block device for device mapper storage, ideally lvm2
|
||||
would be used to create/manage the thin-pool volume that is then
|
||||
handed to docker to exclusively create/manage the thin and thin
|
||||
snapshot volumes needed for it's containers. Managing the thin-pool
|
||||
outside of docker makes for the most feature-rich method of having
|
||||
docker utilize device mapper thin provisioning as the backing
|
||||
storage for docker's containers. lvm2-based thin-pool management
|
||||
feature highlights include: automatic or interactive thin-pool
|
||||
resize support, dynamically change thin-pool features, automatic
|
||||
thinp metadata checking when lvm2 activates the thin-pool, etc.
|
||||
|
||||
Example use:
|
||||
|
||||
``docker -d --storage-opt dm.thinpooldev=/dev/mapper/thin-pool``
|
||||
|
||||
* `dm.datadev`
|
||||
|
||||
Specifies a custom blockdevice to use for data for the thin pool.
|
||||
|
|
|
@ -84,6 +84,7 @@ type DeviceSet struct {
|
|||
metadataDevice string
|
||||
doBlkDiscard bool
|
||||
thinpBlockSize uint32
|
||||
thinPoolDevice string
|
||||
}
|
||||
|
||||
type DiskUsage struct {
|
||||
|
@ -150,7 +151,11 @@ func (devices *DeviceSet) oldMetadataFile() string {
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) getPoolName() string {
|
||||
return devices.devicePrefix + "-pool"
|
||||
if devices.thinPoolDevice == "" {
|
||||
return devices.devicePrefix + "-pool"
|
||||
} else {
|
||||
return devices.thinPoolDevice
|
||||
}
|
||||
}
|
||||
|
||||
func (devices *DeviceSet) getPoolDevName() string {
|
||||
|
@ -411,7 +416,21 @@ func (devices *DeviceSet) setupBaseImage() error {
|
|||
}
|
||||
}
|
||||
|
||||
log.Debugf("Initializing base device-manager snapshot")
|
||||
if devices.thinPoolDevice != "" && oldInfo == nil {
|
||||
if _, transactionId, dataUsed, _, _, _, err := devices.poolStatus(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if dataUsed != 0 {
|
||||
return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks",
|
||||
devices.thinPoolDevice)
|
||||
} else if transactionId != 0 {
|
||||
return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id",
|
||||
devices.thinPoolDevice)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Initializing base device-mapper thin volume")
|
||||
|
||||
id := devices.NextDeviceId
|
||||
|
||||
|
@ -430,7 +449,7 @@ func (devices *DeviceSet) setupBaseImage() error {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Creating filesystem on base device-manager snapshot")
|
||||
log.Debugf("Creating filesystem on base device-mapper thin volume")
|
||||
|
||||
if err = devices.activateDeviceIfNeeded(info); err != nil {
|
||||
return err
|
||||
|
@ -605,7 +624,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||
devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
|
||||
log.Debugf("Generated prefix: %s", devices.devicePrefix)
|
||||
|
||||
// Check for the existence of the device <prefix>-pool
|
||||
// Check for the existence of the thin-pool device
|
||||
log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
|
||||
info, err := devicemapper.GetInfo(devices.getPoolName())
|
||||
if info == nil {
|
||||
|
@ -624,7 +643,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||
createdLoopback := false
|
||||
|
||||
// If the pool doesn't exist, create it
|
||||
if info.Exists == 0 {
|
||||
if info.Exists == 0 && devices.thinPoolDevice == "" {
|
||||
log.Debugf("Pool doesn't exist. Creating it.")
|
||||
|
||||
var (
|
||||
|
@ -988,8 +1007,10 @@ func (devices *DeviceSet) Shutdown() error {
|
|||
}
|
||||
|
||||
devices.Lock()
|
||||
if err := devices.deactivatePool(); err != nil {
|
||||
log.Debugf("Shutdown deactivate pool , error: %s", err)
|
||||
if devices.thinPoolDevice == "" {
|
||||
if err := devices.deactivatePool(); err != nil {
|
||||
log.Debugf("Shutdown deactivate pool , error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
devices.saveDeviceSetMetaData()
|
||||
|
@ -1275,6 +1296,8 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error
|
|||
devices.metadataDevice = val
|
||||
case "dm.datadev":
|
||||
devices.dataDevice = val
|
||||
case "dm.thinpooldev":
|
||||
devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/")
|
||||
case "dm.blkdiscard":
|
||||
foundBlkDiscard = true
|
||||
devices.doBlkDiscard, err = strconv.ParseBool(val)
|
||||
|
|
Loading…
Add table
Reference in a new issue