mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
22c411517a
Commitb717de5153
changed the name of the "size" argument to "virtualSize", as the "VirtualSize" field was re-used for calculating the size of all layers _not_ used by other images.be20dc15af
reverted the change in calculation, but did not change the argument name back to "size". This changes the name back to its original name, because since the introduction of the content-addressable store in docker 1.10, there no longer is a "virtual" size, so "size" is a better name for this argument. Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
331 lines
8.6 KiB
Go
331 lines
8.6 KiB
Go
package daemon
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"sort"
|
|
"time"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/docker/distribution/reference"
|
|
"github.com/docker/docker/api/types"
|
|
"github.com/docker/docker/api/types/filters"
|
|
"github.com/docker/docker/container"
|
|
"github.com/docker/docker/image"
|
|
"github.com/docker/docker/layer"
|
|
)
|
|
|
|
var acceptedImageFilterTags = map[string]bool{
|
|
"dangling": true,
|
|
"label": true,
|
|
"before": true,
|
|
"since": true,
|
|
"reference": true,
|
|
}
|
|
|
|
// byCreated is a temporary type used to sort a list of images by creation
|
|
// time.
|
|
type byCreated []*types.ImageSummary
|
|
|
|
func (r byCreated) Len() int { return len(r) }
|
|
func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
|
func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created }
|
|
|
|
// Map returns a map of all images in the ImageStore
|
|
func (daemon *Daemon) Map() map[image.ID]*image.Image {
|
|
return daemon.imageStore.Map()
|
|
}
|
|
|
|
// Images returns a filtered list of images. filterArgs is a JSON-encoded set
|
|
// of filter arguments which will be interpreted by api/types/filters.
|
|
// filter is a shell glob string applied to repository names. The argument
|
|
// named all controls whether all images in the graph are filtered, or just
|
|
// the heads.
|
|
func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) {
|
|
var (
|
|
allImages map[image.ID]*image.Image
|
|
err error
|
|
danglingOnly = false
|
|
)
|
|
|
|
if err := imageFilters.Validate(acceptedImageFilterTags); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if imageFilters.Include("dangling") {
|
|
if imageFilters.ExactMatch("dangling", "true") {
|
|
danglingOnly = true
|
|
} else if !imageFilters.ExactMatch("dangling", "false") {
|
|
return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling"))
|
|
}
|
|
}
|
|
if danglingOnly {
|
|
allImages = daemon.imageStore.Heads()
|
|
} else {
|
|
allImages = daemon.imageStore.Map()
|
|
}
|
|
|
|
var beforeFilter, sinceFilter *image.Image
|
|
err = imageFilters.WalkValues("before", func(value string) error {
|
|
beforeFilter, err = daemon.GetImage(value)
|
|
return err
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
err = imageFilters.WalkValues("since", func(value string) error {
|
|
sinceFilter, err = daemon.GetImage(value)
|
|
return err
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
images := []*types.ImageSummary{}
|
|
var imagesMap map[*image.Image]*types.ImageSummary
|
|
var layerRefs map[layer.ChainID]int
|
|
var allLayers map[layer.ChainID]layer.Layer
|
|
var allContainers []*container.Container
|
|
|
|
for id, img := range allImages {
|
|
if beforeFilter != nil {
|
|
if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) {
|
|
continue
|
|
}
|
|
}
|
|
|
|
if sinceFilter != nil {
|
|
if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) {
|
|
continue
|
|
}
|
|
}
|
|
|
|
if imageFilters.Include("label") {
|
|
// Very old image that do not have image.Config (or even labels)
|
|
if img.Config == nil {
|
|
continue
|
|
}
|
|
// We are now sure image.Config is not nil
|
|
if !imageFilters.MatchKVList("label", img.Config.Labels) {
|
|
continue
|
|
}
|
|
}
|
|
|
|
layerID := img.RootFS.ChainID()
|
|
var size int64
|
|
if layerID != "" {
|
|
l, err := daemon.layerStore.Get(layerID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
size, err = l.Size()
|
|
layer.ReleaseAndLog(daemon.layerStore, l)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
newImage := newImage(img, size)
|
|
|
|
for _, ref := range daemon.referenceStore.References(id.Digest()) {
|
|
if imageFilters.Include("reference") {
|
|
var found bool
|
|
var matchErr error
|
|
for _, pattern := range imageFilters.Get("reference") {
|
|
found, matchErr = reference.Match(pattern, ref)
|
|
if matchErr != nil {
|
|
return nil, matchErr
|
|
}
|
|
}
|
|
if !found {
|
|
continue
|
|
}
|
|
}
|
|
if _, ok := ref.(reference.Canonical); ok {
|
|
newImage.RepoDigests = append(newImage.RepoDigests, ref.String())
|
|
}
|
|
if _, ok := ref.(reference.NamedTagged); ok {
|
|
newImage.RepoTags = append(newImage.RepoTags, ref.String())
|
|
}
|
|
}
|
|
if newImage.RepoDigests == nil && newImage.RepoTags == nil {
|
|
if all || len(daemon.imageStore.Children(id)) == 0 {
|
|
|
|
if imageFilters.Include("dangling") && !danglingOnly {
|
|
//dangling=false case, so dangling image is not needed
|
|
continue
|
|
}
|
|
if imageFilters.Include("reference") { // skip images with no references if filtering by reference
|
|
continue
|
|
}
|
|
newImage.RepoDigests = []string{"<none>@<none>"}
|
|
newImage.RepoTags = []string{"<none>:<none>"}
|
|
} else {
|
|
continue
|
|
}
|
|
} else if danglingOnly && len(newImage.RepoTags) > 0 {
|
|
continue
|
|
}
|
|
|
|
if withExtraAttrs {
|
|
// lazyly init variables
|
|
if imagesMap == nil {
|
|
allContainers = daemon.List()
|
|
allLayers = daemon.layerStore.Map()
|
|
imagesMap = make(map[*image.Image]*types.ImageSummary)
|
|
layerRefs = make(map[layer.ChainID]int)
|
|
}
|
|
|
|
// Get container count
|
|
newImage.Containers = 0
|
|
for _, c := range allContainers {
|
|
if c.ImageID == id {
|
|
newImage.Containers++
|
|
}
|
|
}
|
|
|
|
// count layer references
|
|
rootFS := *img.RootFS
|
|
rootFS.DiffIDs = nil
|
|
for _, id := range img.RootFS.DiffIDs {
|
|
rootFS.Append(id)
|
|
chid := rootFS.ChainID()
|
|
layerRefs[chid]++
|
|
if _, ok := allLayers[chid]; !ok {
|
|
return nil, fmt.Errorf("layer %v was not found (corruption?)", chid)
|
|
}
|
|
}
|
|
imagesMap[img] = newImage
|
|
}
|
|
|
|
images = append(images, newImage)
|
|
}
|
|
|
|
if withExtraAttrs {
|
|
// Get Shared and Unique sizes
|
|
for img, newImage := range imagesMap {
|
|
rootFS := *img.RootFS
|
|
rootFS.DiffIDs = nil
|
|
|
|
newImage.SharedSize = 0
|
|
for _, id := range img.RootFS.DiffIDs {
|
|
rootFS.Append(id)
|
|
chid := rootFS.ChainID()
|
|
|
|
diffSize, err := allLayers[chid].DiffSize()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if layerRefs[chid] > 1 {
|
|
newImage.SharedSize += diffSize
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
sort.Sort(sort.Reverse(byCreated(images)))
|
|
|
|
return images, nil
|
|
}
|
|
|
|
// SquashImage creates a new image with the diff of the specified image and the specified parent.
|
|
// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between.
|
|
// The existing image(s) is not destroyed.
|
|
// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents.
|
|
func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
|
img, err := daemon.imageStore.Get(image.ID(id))
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
var parentImg *image.Image
|
|
var parentChainID layer.ChainID
|
|
if len(parent) != 0 {
|
|
parentImg, err = daemon.imageStore.Get(image.ID(parent))
|
|
if err != nil {
|
|
return "", errors.Wrap(err, "error getting specified parent layer")
|
|
}
|
|
parentChainID = parentImg.RootFS.ChainID()
|
|
} else {
|
|
rootFS := image.NewRootFS()
|
|
parentImg = &image.Image{RootFS: rootFS}
|
|
}
|
|
|
|
l, err := daemon.layerStore.Get(img.RootFS.ChainID())
|
|
if err != nil {
|
|
return "", errors.Wrap(err, "error getting image layer")
|
|
}
|
|
defer daemon.layerStore.Release(l)
|
|
|
|
ts, err := l.TarStreamFrom(parentChainID)
|
|
if err != nil {
|
|
return "", errors.Wrapf(err, "error getting tar stream to parent")
|
|
}
|
|
defer ts.Close()
|
|
|
|
newL, err := daemon.layerStore.Register(ts, parentChainID)
|
|
if err != nil {
|
|
return "", errors.Wrap(err, "error registering layer")
|
|
}
|
|
defer daemon.layerStore.Release(newL)
|
|
|
|
var newImage image.Image
|
|
newImage = *img
|
|
newImage.RootFS = nil
|
|
|
|
var rootFS image.RootFS
|
|
rootFS = *parentImg.RootFS
|
|
rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID())
|
|
newImage.RootFS = &rootFS
|
|
|
|
for i, hi := range newImage.History {
|
|
if i >= len(parentImg.History) {
|
|
hi.EmptyLayer = true
|
|
}
|
|
newImage.History[i] = hi
|
|
}
|
|
|
|
now := time.Now()
|
|
var historyComment string
|
|
if len(parent) > 0 {
|
|
historyComment = fmt.Sprintf("merge %s to %s", id, parent)
|
|
} else {
|
|
historyComment = fmt.Sprintf("create new from %s", id)
|
|
}
|
|
|
|
newImage.History = append(newImage.History, image.History{
|
|
Created: now,
|
|
Comment: historyComment,
|
|
})
|
|
newImage.Created = now
|
|
|
|
b, err := json.Marshal(&newImage)
|
|
if err != nil {
|
|
return "", errors.Wrap(err, "error marshalling image config")
|
|
}
|
|
|
|
newImgID, err := daemon.imageStore.Create(b)
|
|
if err != nil {
|
|
return "", errors.Wrap(err, "error creating new image after squash")
|
|
}
|
|
return string(newImgID), nil
|
|
}
|
|
|
|
func newImage(image *image.Image, size int64) *types.ImageSummary {
|
|
newImage := new(types.ImageSummary)
|
|
newImage.ParentID = image.Parent.String()
|
|
newImage.ID = image.ID().String()
|
|
newImage.Created = image.Created.Unix()
|
|
newImage.Size = size
|
|
newImage.VirtualSize = size
|
|
newImage.SharedSize = -1
|
|
newImage.Containers = -1
|
|
if image.Config != nil {
|
|
newImage.Labels = image.Config.Labels
|
|
}
|
|
return newImage
|
|
}
|