mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
devmapper: Add a new option dm.min_free_space
Once thin pool gets full, bad things can happen. Especially in case of xfs it is possible that xfs keeps on retrying IO infinitely (for certain kind of IO) and container hangs. One way to mitigate the problem is that once thin pool is about to get full, start failing some of the docker operations like pulling new images or creation of new containers. That way user will get warning ahead of time and can try to rectify it by creating more free space in thin pool. This can be done either by deleting existing images/containers or by adding more free space to thin pool. This patch adds a new option dm.min_free_space to devicemapper graph driver. Say one specifies dm.min_free_space=10%. This means atleast 10% of data and metadata blocks should be free in pool before new device creation is allowed, otherwise operation will fail. By default min_free_space is 10%. User can change it by specifying dm.min_free_space=X% on command line. A value of 0% will disable the check. Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
This commit is contained in:
parent
64a4a7a191
commit
2e222f69b3
3 changed files with 110 additions and 5 deletions
|
@ -43,11 +43,12 @@ var (
|
||||||
// We retry device removal so many a times that even error messages
|
// We retry device removal so many a times that even error messages
|
||||||
// will fill up console during normal operation. So only log Fatal
|
// will fill up console during normal operation. So only log Fatal
|
||||||
// messages by default.
|
// messages by default.
|
||||||
logLevel = devicemapper.LogLevelFatal
|
logLevel = devicemapper.LogLevelFatal
|
||||||
driverDeferredRemovalSupport = false
|
driverDeferredRemovalSupport = false
|
||||||
enableDeferredRemoval = false
|
enableDeferredRemoval = false
|
||||||
enableDeferredDeletion = false
|
enableDeferredDeletion = false
|
||||||
userBaseSize = false
|
userBaseSize = false
|
||||||
|
defaultMinFreeSpacePercent uint32 = 10
|
||||||
)
|
)
|
||||||
|
|
||||||
const deviceSetMetaFile string = "deviceset-metadata"
|
const deviceSetMetaFile string = "deviceset-metadata"
|
||||||
|
@ -122,6 +123,7 @@ type DeviceSet struct {
|
||||||
deletionWorkerTicker *time.Ticker
|
deletionWorkerTicker *time.Ticker
|
||||||
uidMaps []idtools.IDMap
|
uidMaps []idtools.IDMap
|
||||||
gidMaps []idtools.IDMap
|
gidMaps []idtools.IDMap
|
||||||
|
minFreeSpacePercent uint32 //min free space percentage in thinpool
|
||||||
}
|
}
|
||||||
|
|
||||||
// DiskUsage contains information about disk usage and is used when reporting Status of a device.
|
// DiskUsage contains information about disk usage and is used when reporting Status of a device.
|
||||||
|
@ -753,6 +755,38 @@ func (devices *DeviceSet) getNextFreeDeviceID() (int, error) {
|
||||||
return 0, fmt.Errorf("devmapper: Unable to find a free device ID")
|
return 0, fmt.Errorf("devmapper: Unable to find a free device ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (devices *DeviceSet) poolHasFreeSpace() error {
|
||||||
|
if devices.minFreeSpacePercent == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100
|
||||||
|
if minFreeData < 1 {
|
||||||
|
minFreeData = 1
|
||||||
|
}
|
||||||
|
dataFree := dataTotal - dataUsed
|
||||||
|
if dataFree < minFreeData {
|
||||||
|
return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData)
|
||||||
|
}
|
||||||
|
|
||||||
|
minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100
|
||||||
|
if minFreeMetadata < 1 {
|
||||||
|
minFreeData = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
metadataFree := metadataTotal - metadataUsed
|
||||||
|
if metadataFree < minFreeMetadata {
|
||||||
|
return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
|
func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
|
||||||
devices.Lock()
|
devices.Lock()
|
||||||
defer devices.Unlock()
|
defer devices.Unlock()
|
||||||
|
@ -809,6 +843,10 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo) error {
|
func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo) error {
|
||||||
|
if err := devices.poolHasFreeSpace(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
deviceID, err := devices.getNextFreeDeviceID()
|
deviceID, err := devices.getNextFreeDeviceID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -2437,6 +2475,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
|
||||||
deletionWorkerTicker: time.NewTicker(time.Second * 30),
|
deletionWorkerTicker: time.NewTicker(time.Second * 30),
|
||||||
uidMaps: uidMaps,
|
uidMaps: uidMaps,
|
||||||
gidMaps: gidMaps,
|
gidMaps: gidMaps,
|
||||||
|
minFreeSpacePercent: defaultMinFreeSpacePercent,
|
||||||
}
|
}
|
||||||
|
|
||||||
foundBlkDiscard := false
|
foundBlkDiscard := false
|
||||||
|
@ -2512,6 +2551,22 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case "dm.min_free_space":
|
||||||
|
if !strings.HasSuffix(val, "%") {
|
||||||
|
return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix")
|
||||||
|
}
|
||||||
|
|
||||||
|
valstring := strings.TrimSuffix(val, "%")
|
||||||
|
minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if minFreeSpacePercent >= 100 {
|
||||||
|
return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
devices.minFreeSpacePercent = uint32(minFreeSpacePercent)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("devmapper: Unknown option %s\n", key)
|
return nil, fmt.Errorf("devmapper: Unknown option %s\n", key)
|
||||||
}
|
}
|
||||||
|
|
|
@ -438,6 +438,32 @@ options for `zfs` start with `zfs`.
|
||||||
when unintentional leaking of mount point happens across multiple mount
|
when unintentional leaking of mount point happens across multiple mount
|
||||||
namespaces.
|
namespaces.
|
||||||
|
|
||||||
|
* `dm.min_free_space`
|
||||||
|
|
||||||
|
Specifies the min free space percent in thin pool require for new device
|
||||||
|
creation to succeed. This check applies to both free data space as well
|
||||||
|
as free metadata space. Valid values are from 0% - 99%. Value 0% disables
|
||||||
|
free space checking logic. If user does not specify a value for this optoin,
|
||||||
|
then default value for this option is 10%.
|
||||||
|
|
||||||
|
Whenever a new thin pool device is created (during docker pull or
|
||||||
|
during container creation), docker will check minimum free space is
|
||||||
|
available as specified by this parameter. If that is not the case, then
|
||||||
|
device creation will fail and docker operation will fail.
|
||||||
|
|
||||||
|
One will have to create more free space in thin pool to recover from the
|
||||||
|
error. Either delete some of the images and containers from thin pool and
|
||||||
|
create free space or add more storage to thin pool.
|
||||||
|
|
||||||
|
For lvm thin pool, one can add more storage to volume group container thin
|
||||||
|
pool and that should automatically resolve it. If loop devices are being
|
||||||
|
used, then stop docker, grow the size of loop files and restart docker and
|
||||||
|
that should resolve the issue.
|
||||||
|
|
||||||
|
Example use:
|
||||||
|
|
||||||
|
$ docker daemon --storage-opt dm.min_free_space_percent=10%
|
||||||
|
|
||||||
Currently supported options of `zfs`:
|
Currently supported options of `zfs`:
|
||||||
|
|
||||||
* `zfs.fsname`
|
* `zfs.fsname`
|
||||||
|
|
|
@ -475,6 +475,30 @@ By default docker will pick up the zfs filesystem where docker graph
|
||||||
|
|
||||||
Example use: `docker daemon -s zfs --storage-opt zfs.fsname=zroot/docker`
|
Example use: `docker daemon -s zfs --storage-opt zfs.fsname=zroot/docker`
|
||||||
|
|
||||||
|
#### dm.min_free_space
|
||||||
|
|
||||||
|
Specifies the min free space percent in thin pool require for new device
|
||||||
|
creation to succeed. This check applies to both free data space as well
|
||||||
|
as free metadata space. Valid values are from 0% - 99%. Value 0% disables
|
||||||
|
free space checking logic. If user does not specify a value for this optoin,
|
||||||
|
then default value for this option is 10%.
|
||||||
|
|
||||||
|
Whenever a new thin pool device is created (during docker pull or
|
||||||
|
during container creation), docker will check minimum free space is
|
||||||
|
available as specified by this parameter. If that is not the case, then
|
||||||
|
device creation will fail and docker operation will fail.
|
||||||
|
|
||||||
|
One will have to create more free space in thin pool to recover from the
|
||||||
|
error. Either delete some of the images and containers from thin pool and
|
||||||
|
create free space or add more storage to thin pool.
|
||||||
|
|
||||||
|
For lvm thin pool, one can add more storage to volume group container thin
|
||||||
|
pool and that should automatically resolve it. If loop devices are being
|
||||||
|
used, then stop docker, grow the size of loop files and restart docker and
|
||||||
|
that should resolve the issue.
|
||||||
|
|
||||||
|
Example use: `docker daemon --storage-opt dm.min_free_space_percent=10%`
|
||||||
|
|
||||||
# CLUSTER STORE OPTIONS
|
# CLUSTER STORE OPTIONS
|
||||||
|
|
||||||
The daemon uses libkv to advertise
|
The daemon uses libkv to advertise
|
||||||
|
|
Loading…
Reference in a new issue