2015-11-18 17:20:54 -05:00
|
|
|
package daemon
|
|
|
|
|
|
|
|
import (
|
2016-04-21 12:08:37 -04:00
|
|
|
"encoding/json"
|
2015-11-18 17:20:54 -05:00
|
|
|
"fmt"
|
|
|
|
"sort"
|
2016-04-21 12:08:37 -04:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
2015-11-18 17:20:54 -05:00
|
|
|
|
2016-11-11 09:34:01 -05:00
|
|
|
"github.com/docker/distribution/reference"
|
2016-09-06 14:18:12 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
|
|
|
"github.com/docker/docker/api/types/filters"
|
2016-08-23 19:19:37 -04:00
|
|
|
"github.com/docker/docker/container"
|
2015-11-18 17:20:54 -05:00
|
|
|
"github.com/docker/docker/image"
|
|
|
|
"github.com/docker/docker/layer"
|
|
|
|
)
|
|
|
|
|
2015-11-25 20:27:11 -05:00
|
|
|
var acceptedImageFilterTags = map[string]bool{
|
2016-11-11 09:34:01 -05:00
|
|
|
"dangling": true,
|
|
|
|
"label": true,
|
|
|
|
"before": true,
|
|
|
|
"since": true,
|
|
|
|
"reference": true,
|
2015-11-18 17:20:54 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// byCreated is a temporary type used to sort a list of images by creation
|
|
|
|
// time.
|
2016-10-03 15:17:39 -04:00
|
|
|
type byCreated []*types.ImageSummary
|
2015-11-18 17:20:54 -05:00
|
|
|
|
|
|
|
func (r byCreated) Len() int { return len(r) }
|
|
|
|
func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
|
|
|
func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created }
|
|
|
|
|
|
|
|
// Map returns a map of all images in the ImageStore
|
|
|
|
func (daemon *Daemon) Map() map[image.ID]*image.Image {
|
|
|
|
return daemon.imageStore.Map()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Images returns a filtered list of images. filterArgs is a JSON-encoded set
|
2015-12-15 16:40:11 -05:00
|
|
|
// of filter arguments which will be interpreted by api/types/filters.
|
2015-11-18 17:20:54 -05:00
|
|
|
// filter is a shell glob string applied to repository names. The argument
|
|
|
|
// named all controls whether all images in the graph are filtered, or just
|
|
|
|
// the heads.
|
2016-11-11 09:34:01 -05:00
|
|
|
func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) {
|
2015-11-18 17:20:54 -05:00
|
|
|
var (
|
|
|
|
allImages map[image.ID]*image.Image
|
|
|
|
err error
|
|
|
|
danglingOnly = false
|
|
|
|
)
|
|
|
|
|
2015-11-25 20:27:11 -05:00
|
|
|
if err := imageFilters.Validate(acceptedImageFilterTags); err != nil {
|
|
|
|
return nil, err
|
2015-11-18 17:20:54 -05:00
|
|
|
}
|
|
|
|
|
2015-11-25 20:27:11 -05:00
|
|
|
if imageFilters.Include("dangling") {
|
|
|
|
if imageFilters.ExactMatch("dangling", "true") {
|
|
|
|
danglingOnly = true
|
|
|
|
} else if !imageFilters.ExactMatch("dangling", "false") {
|
|
|
|
return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling"))
|
2015-11-18 17:20:54 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if danglingOnly {
|
|
|
|
allImages = daemon.imageStore.Heads()
|
|
|
|
} else {
|
|
|
|
allImages = daemon.imageStore.Map()
|
|
|
|
}
|
|
|
|
|
2016-05-25 07:49:10 -04:00
|
|
|
var beforeFilter, sinceFilter *image.Image
|
|
|
|
err = imageFilters.WalkValues("before", func(value string) error {
|
|
|
|
beforeFilter, err = daemon.GetImage(value)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = imageFilters.WalkValues("since", func(value string) error {
|
|
|
|
sinceFilter, err = daemon.GetImage(value)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-10-03 15:17:39 -04:00
|
|
|
images := []*types.ImageSummary{}
|
|
|
|
var imagesMap map[*image.Image]*types.ImageSummary
|
2016-08-23 19:19:37 -04:00
|
|
|
var layerRefs map[layer.ChainID]int
|
|
|
|
var allLayers map[layer.ChainID]layer.Layer
|
|
|
|
var allContainers []*container.Container
|
2015-11-18 17:20:54 -05:00
|
|
|
|
|
|
|
for id, img := range allImages {
|
2016-05-25 07:49:10 -04:00
|
|
|
if beforeFilter != nil {
|
|
|
|
if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sinceFilter != nil {
|
|
|
|
if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-25 20:27:11 -05:00
|
|
|
if imageFilters.Include("label") {
|
|
|
|
// Very old image that do not have image.Config (or even labels)
|
2015-11-18 17:20:54 -05:00
|
|
|
if img.Config == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// We are now sure image.Config is not nil
|
|
|
|
if !imageFilters.MatchKVList("label", img.Config.Labels) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
layerID := img.RootFS.ChainID()
|
|
|
|
var size int64
|
|
|
|
if layerID != "" {
|
|
|
|
l, err := daemon.layerStore.Get(layerID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
size, err = l.Size()
|
|
|
|
layer.ReleaseAndLog(daemon.layerStore, l)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newImage := newImage(img, size)
|
|
|
|
|
2016-09-15 19:37:32 -04:00
|
|
|
for _, ref := range daemon.referenceStore.References(id.Digest()) {
|
2016-11-11 09:34:01 -05:00
|
|
|
if imageFilters.Include("reference") {
|
|
|
|
var found bool
|
|
|
|
var matchErr error
|
|
|
|
for _, pattern := range imageFilters.Get("reference") {
|
2017-01-25 19:54:18 -05:00
|
|
|
found, matchErr = reference.FamiliarMatch(pattern, ref)
|
2016-11-11 09:34:01 -05:00
|
|
|
if matchErr != nil {
|
|
|
|
return nil, matchErr
|
2015-11-18 17:20:54 -05:00
|
|
|
}
|
2016-11-11 09:34:01 -05:00
|
|
|
}
|
|
|
|
if !found {
|
2015-11-18 17:20:54 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2015-12-04 16:55:15 -05:00
|
|
|
if _, ok := ref.(reference.Canonical); ok {
|
2017-01-25 19:54:18 -05:00
|
|
|
newImage.RepoDigests = append(newImage.RepoDigests, reference.FamiliarString(ref))
|
2015-11-18 17:20:54 -05:00
|
|
|
}
|
2015-12-04 16:55:15 -05:00
|
|
|
if _, ok := ref.(reference.NamedTagged); ok {
|
2017-01-25 19:54:18 -05:00
|
|
|
newImage.RepoTags = append(newImage.RepoTags, reference.FamiliarString(ref))
|
2015-11-18 17:20:54 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if newImage.RepoDigests == nil && newImage.RepoTags == nil {
|
|
|
|
if all || len(daemon.imageStore.Children(id)) == 0 {
|
2016-01-14 01:58:31 -05:00
|
|
|
|
|
|
|
if imageFilters.Include("dangling") && !danglingOnly {
|
|
|
|
//dangling=false case, so dangling image is not needed
|
|
|
|
continue
|
|
|
|
}
|
2016-11-11 09:34:01 -05:00
|
|
|
if imageFilters.Include("reference") { // skip images with no references if filtering by reference
|
2015-11-18 17:20:54 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
newImage.RepoDigests = []string{"<none>@<none>"}
|
|
|
|
newImage.RepoTags = []string{"<none>:<none>"}
|
|
|
|
} else {
|
|
|
|
continue
|
|
|
|
}
|
2016-06-30 12:24:46 -04:00
|
|
|
} else if danglingOnly && len(newImage.RepoTags) > 0 {
|
2015-11-18 17:20:54 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-08-23 19:19:37 -04:00
|
|
|
if withExtraAttrs {
|
2017-02-16 07:08:57 -05:00
|
|
|
// lazily init variables
|
2016-08-23 19:37:37 -04:00
|
|
|
if imagesMap == nil {
|
2016-08-23 19:19:37 -04:00
|
|
|
allContainers = daemon.List()
|
|
|
|
allLayers = daemon.layerStore.Map()
|
2016-10-03 15:17:39 -04:00
|
|
|
imagesMap = make(map[*image.Image]*types.ImageSummary)
|
2016-08-23 19:19:37 -04:00
|
|
|
layerRefs = make(map[layer.ChainID]int)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get container count
|
|
|
|
newImage.Containers = 0
|
|
|
|
for _, c := range allContainers {
|
|
|
|
if c.ImageID == id {
|
|
|
|
newImage.Containers++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// count layer references
|
|
|
|
rootFS := *img.RootFS
|
|
|
|
rootFS.DiffIDs = nil
|
|
|
|
for _, id := range img.RootFS.DiffIDs {
|
|
|
|
rootFS.Append(id)
|
|
|
|
chid := rootFS.ChainID()
|
|
|
|
layerRefs[chid]++
|
|
|
|
if _, ok := allLayers[chid]; !ok {
|
|
|
|
return nil, fmt.Errorf("layer %v was not found (corruption?)", chid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
imagesMap[img] = newImage
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:20:54 -05:00
|
|
|
images = append(images, newImage)
|
|
|
|
}
|
|
|
|
|
2016-08-23 19:19:37 -04:00
|
|
|
if withExtraAttrs {
|
2017-01-23 16:52:33 -05:00
|
|
|
// Get Shared sizes
|
2016-08-23 19:19:37 -04:00
|
|
|
for img, newImage := range imagesMap {
|
|
|
|
rootFS := *img.RootFS
|
|
|
|
rootFS.DiffIDs = nil
|
|
|
|
|
|
|
|
newImage.SharedSize = 0
|
|
|
|
for _, id := range img.RootFS.DiffIDs {
|
|
|
|
rootFS.Append(id)
|
|
|
|
chid := rootFS.ChainID()
|
|
|
|
|
|
|
|
diffSize, err := allLayers[chid].DiffSize()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if layerRefs[chid] > 1 {
|
|
|
|
newImage.SharedSize += diffSize
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:20:54 -05:00
|
|
|
sort.Sort(sort.Reverse(byCreated(images)))
|
|
|
|
|
|
|
|
return images, nil
|
|
|
|
}
|
|
|
|
|
2016-04-21 12:08:37 -04:00
|
|
|
// SquashImage creates a new image with the diff of the specified image and the specified parent.
|
|
|
|
// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between.
|
|
|
|
// The existing image(s) is not destroyed.
|
|
|
|
// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents.
|
|
|
|
func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
|
|
|
img, err := daemon.imageStore.Get(image.ID(id))
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
var parentImg *image.Image
|
|
|
|
var parentChainID layer.ChainID
|
|
|
|
if len(parent) != 0 {
|
|
|
|
parentImg, err = daemon.imageStore.Get(image.ID(parent))
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "error getting specified parent layer")
|
|
|
|
}
|
|
|
|
parentChainID = parentImg.RootFS.ChainID()
|
|
|
|
} else {
|
|
|
|
rootFS := image.NewRootFS()
|
|
|
|
parentImg = &image.Image{RootFS: rootFS}
|
|
|
|
}
|
|
|
|
|
|
|
|
l, err := daemon.layerStore.Get(img.RootFS.ChainID())
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "error getting image layer")
|
|
|
|
}
|
|
|
|
defer daemon.layerStore.Release(l)
|
|
|
|
|
|
|
|
ts, err := l.TarStreamFrom(parentChainID)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrapf(err, "error getting tar stream to parent")
|
|
|
|
}
|
|
|
|
defer ts.Close()
|
|
|
|
|
|
|
|
newL, err := daemon.layerStore.Register(ts, parentChainID)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "error registering layer")
|
|
|
|
}
|
|
|
|
defer daemon.layerStore.Release(newL)
|
|
|
|
|
|
|
|
var newImage image.Image
|
|
|
|
newImage = *img
|
|
|
|
newImage.RootFS = nil
|
|
|
|
|
|
|
|
var rootFS image.RootFS
|
|
|
|
rootFS = *parentImg.RootFS
|
|
|
|
rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID())
|
|
|
|
newImage.RootFS = &rootFS
|
|
|
|
|
|
|
|
for i, hi := range newImage.History {
|
|
|
|
if i >= len(parentImg.History) {
|
|
|
|
hi.EmptyLayer = true
|
|
|
|
}
|
|
|
|
newImage.History[i] = hi
|
|
|
|
}
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
var historyComment string
|
|
|
|
if len(parent) > 0 {
|
|
|
|
historyComment = fmt.Sprintf("merge %s to %s", id, parent)
|
|
|
|
} else {
|
|
|
|
historyComment = fmt.Sprintf("create new from %s", id)
|
|
|
|
}
|
|
|
|
|
|
|
|
newImage.History = append(newImage.History, image.History{
|
|
|
|
Created: now,
|
|
|
|
Comment: historyComment,
|
|
|
|
})
|
|
|
|
newImage.Created = now
|
|
|
|
|
|
|
|
b, err := json.Marshal(&newImage)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "error marshalling image config")
|
|
|
|
}
|
|
|
|
|
|
|
|
newImgID, err := daemon.imageStore.Create(b)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "error creating new image after squash")
|
|
|
|
}
|
|
|
|
return string(newImgID), nil
|
|
|
|
}
|
|
|
|
|
2017-01-13 04:25:03 -05:00
|
|
|
func newImage(image *image.Image, size int64) *types.ImageSummary {
|
2016-10-03 15:17:39 -04:00
|
|
|
newImage := new(types.ImageSummary)
|
2015-11-18 17:20:54 -05:00
|
|
|
newImage.ParentID = image.Parent.String()
|
|
|
|
newImage.ID = image.ID().String()
|
|
|
|
newImage.Created = image.Created.Unix()
|
2017-01-13 04:25:03 -05:00
|
|
|
newImage.Size = size
|
|
|
|
newImage.VirtualSize = size
|
2016-08-23 19:19:37 -04:00
|
|
|
newImage.SharedSize = -1
|
|
|
|
newImage.Containers = -1
|
2015-11-18 17:20:54 -05:00
|
|
|
if image.Config != nil {
|
|
|
|
newImage.Labels = image.Config.Labels
|
|
|
|
}
|
|
|
|
return newImage
|
|
|
|
}
|