Merge pull request #20786 from rhvgoyal/min-free-space

devmapper: Add a new option dm.min_free_space_percent
This commit is contained in:
Vincent Batts 2016-03-14 20:10:43 -04:00
commit bfed97b688
3 changed files with 110 additions and 5 deletions

View File

@ -43,11 +43,12 @@ var (
// We retry device removal so many a times that even error messages
// will fill up console during normal operation. So only log Fatal
// messages by default.
logLevel = devicemapper.LogLevelFatal
driverDeferredRemovalSupport = false
enableDeferredRemoval = false
enableDeferredDeletion = false
userBaseSize = false
logLevel = devicemapper.LogLevelFatal
driverDeferredRemovalSupport = false
enableDeferredRemoval = false
enableDeferredDeletion = false
userBaseSize = false
defaultMinFreeSpacePercent uint32 = 10
)
const deviceSetMetaFile string = "deviceset-metadata"
@ -122,6 +123,7 @@ type DeviceSet struct {
deletionWorkerTicker *time.Ticker
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
minFreeSpacePercent uint32 //min free space percentage in thinpool
}
// DiskUsage contains information about disk usage and is used when reporting Status of a device.
@ -753,6 +755,38 @@ func (devices *DeviceSet) getNextFreeDeviceID() (int, error) {
return 0, fmt.Errorf("devmapper: Unable to find a free device ID")
}
func (devices *DeviceSet) poolHasFreeSpace() error {
if devices.minFreeSpacePercent == 0 {
return nil
}
_, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus()
if err != nil {
return err
}
minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100
if minFreeData < 1 {
minFreeData = 1
}
dataFree := dataTotal - dataUsed
if dataFree < minFreeData {
return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData)
}
minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100
if minFreeMetadata < 1 {
minFreeData = 1
}
metadataFree := metadataTotal - metadataUsed
if metadataFree < minFreeMetadata {
return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata)
}
return nil
}
func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
devices.Lock()
defer devices.Unlock()
@ -809,6 +843,10 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
}
func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo) error {
if err := devices.poolHasFreeSpace(); err != nil {
return err
}
deviceID, err := devices.getNextFreeDeviceID()
if err != nil {
return err
@ -2437,6 +2475,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
deletionWorkerTicker: time.NewTicker(time.Second * 30),
uidMaps: uidMaps,
gidMaps: gidMaps,
minFreeSpacePercent: defaultMinFreeSpacePercent,
}
foundBlkDiscard := false
@ -2512,6 +2551,22 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
return nil, err
}
case "dm.min_free_space":
if !strings.HasSuffix(val, "%") {
return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix")
}
valstring := strings.TrimSuffix(val, "%")
minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32)
if err != nil {
return nil, err
}
if minFreeSpacePercent >= 100 {
return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val)
}
devices.minFreeSpacePercent = uint32(minFreeSpacePercent)
default:
return nil, fmt.Errorf("devmapper: Unknown option %s\n", key)
}

View File

@ -438,6 +438,32 @@ options for `zfs` start with `zfs`.
when unintentional leaking of mount point happens across multiple mount
namespaces.
* `dm.min_free_space`
Specifies the min free space percent in thin pool require for new device
creation to succeed. This check applies to both free data space as well
as free metadata space. Valid values are from 0% - 99%. Value 0% disables
free space checking logic. If user does not specify a value for this optoin,
then default value for this option is 10%.
Whenever a new thin pool device is created (during docker pull or
during container creation), docker will check minimum free space is
available as specified by this parameter. If that is not the case, then
device creation will fail and docker operation will fail.
One will have to create more free space in thin pool to recover from the
error. Either delete some of the images and containers from thin pool and
create free space or add more storage to thin pool.
For lvm thin pool, one can add more storage to volume group container thin
pool and that should automatically resolve it. If loop devices are being
used, then stop docker, grow the size of loop files and restart docker and
that should resolve the issue.
Example use:
$ docker daemon --storage-opt dm.min_free_space_percent=10%
Currently supported options of `zfs`:
* `zfs.fsname`

View File

@ -475,6 +475,30 @@ By default docker will pick up the zfs filesystem where docker graph
Example use: `docker daemon -s zfs --storage-opt zfs.fsname=zroot/docker`
#### dm.min_free_space
Specifies the min free space percent in thin pool require for new device
creation to succeed. This check applies to both free data space as well
as free metadata space. Valid values are from 0% - 99%. Value 0% disables
free space checking logic. If user does not specify a value for this optoin,
then default value for this option is 10%.
Whenever a new thin pool device is created (during docker pull or
during container creation), docker will check minimum free space is
available as specified by this parameter. If that is not the case, then
device creation will fail and docker operation will fail.
One will have to create more free space in thin pool to recover from the
error. Either delete some of the images and containers from thin pool and
create free space or add more storage to thin pool.
For lvm thin pool, one can add more storage to volume group container thin
pool and that should automatically resolve it. If loop devices are being
used, then stop docker, grow the size of loop files and restart docker and
that should resolve the issue.
Example use: `docker daemon --storage-opt dm.min_free_space_percent=10%`
# CLUSTER STORE OPTIONS
The daemon uses libkv to advertise