mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
51c7992928
This patch adds a new "prune" event type to indicate that pruning of a resource type completed. This event-type can be used on systems that want to perform actions after resources have been cleaned up. For example, Docker Desktop performs an fstrim after resources are deleted (https://github.com/linuxkit/linuxkit/tree/v0.7/pkg/trim-after-delete). While the current (remove, destroy) events can provide information on _most_ resources, there is currently no event triggered after the BuildKit build-cache is cleaned. Prune events have a `reclaimed` attribute, indicating the amount of space that was reclaimed (in bytes). The attribute can be used, for example, to use as a threshold for performing fstrim actions. Reclaimed space for `network` events will always be 0, but the field is added to be consistent with prune events for other resources. To test this patch: Create some resources: for i in foo bar baz; do \ docker network create network_$i \ && docker volume create volume_$i \ && docker run -d --name container_$i -v volume_$i:/volume busybox sh -c 'truncate -s 5M somefile; truncate -s 5M /volume/file' \ && docker tag busybox:latest image_$i; \ done; docker pull alpine docker pull nginx:alpine echo -e "FROM busybox\nRUN truncate -s 50M bigfile" | DOCKER_BUILDKIT=1 docker build - Start listening for "prune" events in another shell: docker events --filter event=prune Prune containers, networks, volumes, and build-cache: docker system prune -af --volumes See the events that are returned: docker events --filter event=prune 2020-07-25T12:12:09.268491000Z container prune (reclaimed=15728640) 2020-07-25T12:12:09.447890400Z network prune (reclaimed=0) 2020-07-25T12:12:09.452323000Z volume prune (reclaimed=15728640) 2020-07-25T12:12:09.517236200Z image prune (reclaimed=21568540) 2020-07-25T12:12:09.566662600Z builder prune (reclaimed=52428841) Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
217 lines
5.5 KiB
Go
217 lines
5.5 KiB
Go
package images // import "github.com/docker/docker/daemon/images"
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"github.com/docker/distribution/reference"
|
|
"github.com/docker/docker/api/types"
|
|
"github.com/docker/docker/api/types/events"
|
|
"github.com/docker/docker/api/types/filters"
|
|
timetypes "github.com/docker/docker/api/types/time"
|
|
"github.com/docker/docker/errdefs"
|
|
"github.com/docker/docker/image"
|
|
"github.com/docker/docker/layer"
|
|
digest "github.com/opencontainers/go-digest"
|
|
"github.com/pkg/errors"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
var imagesAcceptedFilters = map[string]bool{
|
|
"dangling": true,
|
|
"label": true,
|
|
"label!": true,
|
|
"until": true,
|
|
}
|
|
|
|
// errPruneRunning is returned when a prune request is received while
|
|
// one is in progress
|
|
var errPruneRunning = errdefs.Conflict(errors.New("a prune operation is already running"))
|
|
|
|
// ImagesPrune removes unused images
|
|
func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) {
|
|
if !atomic.CompareAndSwapInt32(&i.pruneRunning, 0, 1) {
|
|
return nil, errPruneRunning
|
|
}
|
|
defer atomic.StoreInt32(&i.pruneRunning, 0)
|
|
|
|
// make sure that only accepted filters have been received
|
|
err := pruneFilters.Validate(imagesAcceptedFilters)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
rep := &types.ImagesPruneReport{}
|
|
|
|
danglingOnly := true
|
|
if pruneFilters.Contains("dangling") {
|
|
if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") {
|
|
danglingOnly = false
|
|
} else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") {
|
|
return nil, invalidFilter{"dangling", pruneFilters.Get("dangling")}
|
|
}
|
|
}
|
|
|
|
until, err := getUntilFromPruneFilters(pruneFilters)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
var allImages map[image.ID]*image.Image
|
|
if danglingOnly {
|
|
allImages = i.imageStore.Heads()
|
|
} else {
|
|
allImages = i.imageStore.Map()
|
|
}
|
|
|
|
// Filter intermediary images and get their unique size
|
|
allLayers := make(map[layer.ChainID]layer.Layer)
|
|
for _, ls := range i.layerStores {
|
|
for k, v := range ls.Map() {
|
|
allLayers[k] = v
|
|
}
|
|
}
|
|
topImages := map[image.ID]*image.Image{}
|
|
for id, img := range allImages {
|
|
select {
|
|
case <-ctx.Done():
|
|
return nil, ctx.Err()
|
|
default:
|
|
dgst := digest.Digest(id)
|
|
if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 {
|
|
continue
|
|
}
|
|
if !until.IsZero() && img.Created.After(until) {
|
|
continue
|
|
}
|
|
if img.Config != nil && !matchLabels(pruneFilters, img.Config.Labels) {
|
|
continue
|
|
}
|
|
topImages[id] = img
|
|
}
|
|
}
|
|
|
|
canceled := false
|
|
deleteImagesLoop:
|
|
for id := range topImages {
|
|
select {
|
|
case <-ctx.Done():
|
|
// we still want to calculate freed size and return the data
|
|
canceled = true
|
|
break deleteImagesLoop
|
|
default:
|
|
}
|
|
|
|
deletedImages := []types.ImageDeleteResponseItem{}
|
|
refs := i.referenceStore.References(id.Digest())
|
|
if len(refs) > 0 {
|
|
shouldDelete := !danglingOnly
|
|
if !shouldDelete {
|
|
hasTag := false
|
|
for _, ref := range refs {
|
|
if _, ok := ref.(reference.NamedTagged); ok {
|
|
hasTag = true
|
|
break
|
|
}
|
|
}
|
|
|
|
// Only delete if it's untagged (i.e. repo:<none>)
|
|
shouldDelete = !hasTag
|
|
}
|
|
|
|
if shouldDelete {
|
|
for _, ref := range refs {
|
|
imgDel, err := i.ImageDelete(ref.String(), false, true)
|
|
if imageDeleteFailed(ref.String(), err) {
|
|
continue
|
|
}
|
|
deletedImages = append(deletedImages, imgDel...)
|
|
}
|
|
}
|
|
} else {
|
|
hex := id.Digest().Hex()
|
|
imgDel, err := i.ImageDelete(hex, false, true)
|
|
if imageDeleteFailed(hex, err) {
|
|
continue
|
|
}
|
|
deletedImages = append(deletedImages, imgDel...)
|
|
}
|
|
|
|
rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...)
|
|
}
|
|
|
|
// Compute how much space was freed
|
|
for _, d := range rep.ImagesDeleted {
|
|
if d.Deleted != "" {
|
|
chid := layer.ChainID(d.Deleted)
|
|
if l, ok := allLayers[chid]; ok {
|
|
diffSize, err := l.DiffSize()
|
|
if err != nil {
|
|
logrus.Warnf("failed to get layer %s size: %v", chid, err)
|
|
continue
|
|
}
|
|
rep.SpaceReclaimed += uint64(diffSize)
|
|
}
|
|
}
|
|
}
|
|
|
|
if canceled {
|
|
logrus.Debugf("ImagesPrune operation cancelled: %#v", *rep)
|
|
}
|
|
i.eventsService.Log("prune", events.ImageEventType, events.Actor{
|
|
Attributes: map[string]string{
|
|
"reclaimed": strconv.FormatUint(rep.SpaceReclaimed, 10),
|
|
},
|
|
})
|
|
return rep, nil
|
|
}
|
|
|
|
func imageDeleteFailed(ref string, err error) bool {
|
|
switch {
|
|
case err == nil:
|
|
return false
|
|
case errdefs.IsConflict(err):
|
|
return true
|
|
default:
|
|
logrus.Warnf("failed to prune image %s: %v", ref, err)
|
|
return true
|
|
}
|
|
}
|
|
|
|
func matchLabels(pruneFilters filters.Args, labels map[string]string) bool {
|
|
if !pruneFilters.MatchKVList("label", labels) {
|
|
return false
|
|
}
|
|
// By default MatchKVList will return true if field (like 'label!') does not exist
|
|
// So we have to add additional Contains("label!") check
|
|
if pruneFilters.Contains("label!") {
|
|
if pruneFilters.MatchKVList("label!", labels) {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) {
|
|
until := time.Time{}
|
|
if !pruneFilters.Contains("until") {
|
|
return until, nil
|
|
}
|
|
untilFilters := pruneFilters.Get("until")
|
|
if len(untilFilters) > 1 {
|
|
return until, fmt.Errorf("more than one until filter specified")
|
|
}
|
|
ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now())
|
|
if err != nil {
|
|
return until, err
|
|
}
|
|
seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0)
|
|
if err != nil {
|
|
return until, err
|
|
}
|
|
until = time.Unix(seconds, nanoseconds)
|
|
return until, nil
|
|
}
|