mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #26839 from tonistiigi/build-cache
Implement build cache based on history array
This commit is contained in:
commit
7944480dd0
14 changed files with 400 additions and 64 deletions
|
@ -70,7 +70,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
|||
var buildUlimits = []*units.Ulimit{}
|
||||
ulimitsJSON := r.FormValue("ulimits")
|
||||
if ulimitsJSON != "" {
|
||||
if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil {
|
||||
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.Ulimits = buildUlimits
|
||||
|
@ -79,7 +79,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
|||
var buildArgs = map[string]string{}
|
||||
buildArgsJSON := r.FormValue("buildargs")
|
||||
if buildArgsJSON != "" {
|
||||
if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil {
|
||||
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.BuildArgs = buildArgs
|
||||
|
@ -87,12 +87,21 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
|||
var labels = map[string]string{}
|
||||
labelsJSON := r.FormValue("labels")
|
||||
if labelsJSON != "" {
|
||||
if err := json.NewDecoder(strings.NewReader(labelsJSON)).Decode(&labels); err != nil {
|
||||
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.Labels = labels
|
||||
}
|
||||
|
||||
var cacheFrom = []string{}
|
||||
cacheFromJSON := r.FormValue("cachefrom")
|
||||
if cacheFromJSON != "" {
|
||||
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.CacheFrom = cacheFrom
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -151,6 +151,9 @@ type ImageBuildOptions struct {
|
|||
// preserves the original image and creates a new one from the parent with all
|
||||
// the changes applied to a single layer
|
||||
Squash bool
|
||||
// CacheFrom specifies images that are used for matching cache. Images
|
||||
// specified here do not need to have a valid parent chain to match cache.
|
||||
CacheFrom []string
|
||||
}
|
||||
|
||||
// ImageBuildResponse holds information
|
||||
|
|
|
@ -153,10 +153,16 @@ type Image interface {
|
|||
RunConfig() *container.Config
|
||||
}
|
||||
|
||||
// ImageCache abstracts an image cache store.
|
||||
// ImageCacheBuilder represents a generator for stateful image cache.
|
||||
type ImageCacheBuilder interface {
|
||||
// MakeImageCache creates a stateful image cache.
|
||||
MakeImageCache(cacheFrom []string) ImageCache
|
||||
}
|
||||
|
||||
// ImageCache abstracts an image cache.
|
||||
// (parent image, child runconfig) -> child image
|
||||
type ImageCache interface {
|
||||
// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent`
|
||||
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
|
||||
GetCachedImageOnBuild(parentID string, cfg *container.Config) (imageID string, err error)
|
||||
GetCache(parentID string, cfg *container.Config) (imageID string, err error)
|
||||
}
|
||||
|
|
|
@ -75,6 +75,8 @@ type Builder struct {
|
|||
|
||||
// TODO: remove once docker.Commit can receive a tag
|
||||
id string
|
||||
|
||||
imageCache builder.ImageCache
|
||||
}
|
||||
|
||||
// BuildManager implements builder.Backend and is shared across all Builder objects.
|
||||
|
@ -136,6 +138,10 @@ func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, back
|
|||
LookingForDirectives: true,
|
||||
},
|
||||
}
|
||||
if icb, ok := backend.(builder.ImageCacheBuilder); ok {
|
||||
b.imageCache = icb.MakeImageCache(config.CacheFrom)
|
||||
}
|
||||
|
||||
parser.SetEscapeToken(parser.DefaultEscapeToken, &b.directive) // Assume the default token for escape
|
||||
|
||||
if dockerfile != nil {
|
||||
|
|
|
@ -438,18 +438,16 @@ func (b *Builder) processImageFrom(img builder.Image) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// probeCache checks if `b.docker` implements builder.ImageCache and image-caching
|
||||
// is enabled (`b.UseCache`).
|
||||
// If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`.
|
||||
// probeCache checks if cache match can be found for current build instruction.
|
||||
// If an image is found, probeCache returns `(true, nil)`.
|
||||
// If no image is found, it returns `(false, nil)`.
|
||||
// If there is any error, it returns `(false, err)`.
|
||||
func (b *Builder) probeCache() (bool, error) {
|
||||
c, ok := b.docker.(builder.ImageCache)
|
||||
if !ok || b.options.NoCache || b.cacheBusted {
|
||||
c := b.imageCache
|
||||
if c == nil || b.options.NoCache || b.cacheBusted {
|
||||
return false, nil
|
||||
}
|
||||
cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig)
|
||||
cache, err := c.GetCache(b.image, b.runConfig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ type buildOptions struct {
|
|||
rm bool
|
||||
forceRm bool
|
||||
pull bool
|
||||
cacheFrom []string
|
||||
}
|
||||
|
||||
// NewBuildCommand creates a new `docker build` command
|
||||
|
@ -98,6 +99,7 @@ func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||
flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers")
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
|
||||
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
|
||||
|
||||
command.AddTrustedFlags(flags, true)
|
||||
|
||||
|
@ -289,6 +291,7 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
|||
BuildArgs: runconfigopts.ConvertKVStringsToMap(options.buildArgs.GetAll()),
|
||||
AuthConfigs: authConfig,
|
||||
Labels: runconfigopts.ConvertKVStringsToMap(options.labels),
|
||||
CacheFrom: options.cacheFrom,
|
||||
}
|
||||
|
||||
response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
|
||||
|
|
|
@ -110,6 +110,13 @@ func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, erro
|
|||
return query, err
|
||||
}
|
||||
query.Set("labels", string(labelsJSON))
|
||||
|
||||
cacheFromJSON, err := json.Marshal(options.CacheFrom)
|
||||
if err != nil {
|
||||
return query, err
|
||||
}
|
||||
query.Set("cachefrom", string(cacheFromJSON))
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
|
|
254
daemon/cache.go
Normal file
254
daemon/cache.go
Normal file
|
@ -0,0 +1,254 @@
|
|||
package daemon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// getLocalCachedImage returns the most recent created image that is a child
|
||||
// of the image with imgID, that had the same config when it was
|
||||
// created. nil is returned if a child cannot be found. An error is
|
||||
// returned if the parent image cannot be found.
|
||||
func (daemon *Daemon) getLocalCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) {
|
||||
// Loop on the children of the given image and check the config
|
||||
getMatch := func(siblings []image.ID) (*image.Image, error) {
|
||||
var match *image.Image
|
||||
for _, id := range siblings {
|
||||
img, err := daemon.imageStore.Get(id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to find image %q", id)
|
||||
}
|
||||
|
||||
if runconfig.Compare(&img.ContainerConfig, config) {
|
||||
// check for the most up to date match
|
||||
if match == nil || match.Created.Before(img.Created) {
|
||||
match = img
|
||||
}
|
||||
}
|
||||
}
|
||||
return match, nil
|
||||
}
|
||||
|
||||
// In this case, this is `FROM scratch`, which isn't an actual image.
|
||||
if imgID == "" {
|
||||
images := daemon.imageStore.Map()
|
||||
var siblings []image.ID
|
||||
for id, img := range images {
|
||||
if img.Parent == imgID {
|
||||
siblings = append(siblings, id)
|
||||
}
|
||||
}
|
||||
return getMatch(siblings)
|
||||
}
|
||||
|
||||
// find match from child images
|
||||
siblings := daemon.imageStore.Children(imgID)
|
||||
return getMatch(siblings)
|
||||
}
|
||||
|
||||
// MakeImageCache creates a stateful image cache.
|
||||
func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache {
|
||||
if len(sourceRefs) == 0 {
|
||||
return &localImageCache{daemon}
|
||||
}
|
||||
|
||||
cache := &imageCache{daemon: daemon, localImageCache: &localImageCache{daemon}}
|
||||
|
||||
for _, ref := range sourceRefs {
|
||||
img, err := daemon.GetImage(ref)
|
||||
if err != nil {
|
||||
logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err)
|
||||
continue
|
||||
}
|
||||
cache.sources = append(cache.sources, img)
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
// localImageCache is cache based on parent chain.
|
||||
type localImageCache struct {
|
||||
daemon *Daemon
|
||||
}
|
||||
|
||||
func (lic *localImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) {
|
||||
return getImageIDAndError(lic.daemon.getLocalCachedImage(image.ID(imgID), config))
|
||||
}
|
||||
|
||||
// imageCache is cache based on history objects. Requires initial set of images.
|
||||
type imageCache struct {
|
||||
sources []*image.Image
|
||||
daemon *Daemon
|
||||
localImageCache *localImageCache
|
||||
}
|
||||
|
||||
func (ic *imageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) {
|
||||
var history []image.History
|
||||
rootFS := image.NewRootFS()
|
||||
lenHistory := 0
|
||||
if parent != nil {
|
||||
history = parent.History
|
||||
rootFS = parent.RootFS
|
||||
lenHistory = len(parent.History)
|
||||
}
|
||||
history = append(history, target.History[lenHistory])
|
||||
if layer := getLayerForHistoryIndex(target, lenHistory); layer != "" {
|
||||
rootFS.Append(layer)
|
||||
}
|
||||
|
||||
config, err := json.Marshal(&image.Image{
|
||||
V1Image: image.V1Image{
|
||||
DockerVersion: dockerversion.Version,
|
||||
Config: cfg,
|
||||
Architecture: target.Architecture,
|
||||
OS: target.OS,
|
||||
Author: target.Author,
|
||||
Created: history[len(history)-1].Created,
|
||||
},
|
||||
RootFS: rootFS,
|
||||
History: history,
|
||||
OSFeatures: target.OSFeatures,
|
||||
OSVersion: target.OSVersion,
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to marshal image config")
|
||||
}
|
||||
|
||||
imgID, err := ic.daemon.imageStore.Create(config)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create cache image")
|
||||
}
|
||||
|
||||
if parent != nil {
|
||||
if err := ic.daemon.imageStore.SetParent(imgID, parent.ID()); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID())
|
||||
}
|
||||
}
|
||||
return imgID, nil
|
||||
}
|
||||
|
||||
func (ic *imageCache) isParent(imgID, parentID image.ID) bool {
|
||||
nextParent, err := ic.daemon.imageStore.GetParent(imgID)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if nextParent == parentID {
|
||||
return true
|
||||
}
|
||||
return ic.isParent(nextParent, parentID)
|
||||
}
|
||||
|
||||
func (ic *imageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) {
|
||||
imgID, err := ic.localImageCache.GetCache(parentID, cfg)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if imgID != "" {
|
||||
for _, s := range ic.sources {
|
||||
if ic.isParent(s.ID(), image.ID(imgID)) {
|
||||
return imgID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var parent *image.Image
|
||||
lenHistory := 0
|
||||
if parentID != "" {
|
||||
parent, err = ic.daemon.imageStore.Get(image.ID(parentID))
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "unable to find image %v", parentID)
|
||||
}
|
||||
lenHistory = len(parent.History)
|
||||
}
|
||||
|
||||
for _, target := range ic.sources {
|
||||
if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(target.History)-1 == lenHistory { // last
|
||||
if parent != nil {
|
||||
if err := ic.daemon.imageStore.SetParent(target.ID(), parent.ID()); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID())
|
||||
}
|
||||
}
|
||||
return target.ID().String(), nil
|
||||
}
|
||||
|
||||
imgID, err := ic.restoreCachedImage(parent, target, cfg)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID())
|
||||
}
|
||||
|
||||
ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm
|
||||
return imgID.String(), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func getImageIDAndError(img *image.Image, err error) (string, error) {
|
||||
if img == nil || err != nil {
|
||||
return "", err
|
||||
}
|
||||
return img.ID().String(), nil
|
||||
}
|
||||
|
||||
func isValidParent(img, parent *image.Image) bool {
|
||||
if len(img.History) == 0 {
|
||||
return false
|
||||
}
|
||||
if parent == nil || len(parent.History) == 0 && len(parent.RootFS.DiffIDs) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(parent.History) >= len(img.History) {
|
||||
return false
|
||||
}
|
||||
if len(parent.RootFS.DiffIDs) >= len(img.RootFS.DiffIDs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, h := range parent.History {
|
||||
if !reflect.DeepEqual(h, img.History[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for i, d := range parent.RootFS.DiffIDs {
|
||||
if d != img.RootFS.DiffIDs[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID {
|
||||
layerIndex := 0
|
||||
for i, h := range image.History {
|
||||
if i == index {
|
||||
if h.EmptyLayer {
|
||||
return ""
|
||||
}
|
||||
break
|
||||
}
|
||||
if !h.EmptyLayer {
|
||||
layerIndex++
|
||||
}
|
||||
}
|
||||
return image.RootFS.DiffIDs[layerIndex] // validate?
|
||||
}
|
||||
|
||||
func isValidConfig(cfg *containertypes.Config, h image.History) bool {
|
||||
// todo: make this format better than join that loses data
|
||||
return strings.Join(cfg.Cmd, " ") == h.CreatedBy
|
||||
}
|
|
@ -3,11 +3,9 @@ package daemon
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/runconfig"
|
||||
)
|
||||
|
||||
// ErrImageDoesNotExist is error returned when no image can be found for a reference.
|
||||
|
@ -71,54 +69,3 @@ func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) {
|
|||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
// GetCachedImage returns the most recent created image that is a child
|
||||
// of the image with imgID, that had the same config when it was
|
||||
// created. nil is returned if a child cannot be found. An error is
|
||||
// returned if the parent image cannot be found.
|
||||
func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) {
|
||||
// Loop on the children of the given image and check the config
|
||||
getMatch := func(siblings []image.ID) (*image.Image, error) {
|
||||
var match *image.Image
|
||||
for _, id := range siblings {
|
||||
img, err := daemon.imageStore.Get(id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to find image %q", id)
|
||||
}
|
||||
|
||||
if runconfig.Compare(&img.ContainerConfig, config) {
|
||||
// check for the most up to date match
|
||||
if match == nil || match.Created.Before(img.Created) {
|
||||
match = img
|
||||
}
|
||||
}
|
||||
}
|
||||
return match, nil
|
||||
}
|
||||
|
||||
// In this case, this is `FROM scratch`, which isn't an actual image.
|
||||
if imgID == "" {
|
||||
images := daemon.imageStore.Map()
|
||||
var siblings []image.ID
|
||||
for id, img := range images {
|
||||
if img.Parent == imgID {
|
||||
siblings = append(siblings, id)
|
||||
}
|
||||
}
|
||||
return getMatch(siblings)
|
||||
}
|
||||
|
||||
// find match from child images
|
||||
siblings := daemon.imageStore.Children(imgID)
|
||||
return getMatch(siblings)
|
||||
}
|
||||
|
||||
// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent`
|
||||
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
|
||||
func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) {
|
||||
cache, err := daemon.GetCachedImage(image.ID(imgID), cfg)
|
||||
if cache == nil || err != nil {
|
||||
return "", err
|
||||
}
|
||||
return cache.ID().String(), nil
|
||||
}
|
||||
|
|
|
@ -124,6 +124,7 @@ This section lists each version from latest to oldest. Each listing includes a
|
|||
* `POST /containers/create` now validates IPAMConfig in NetworkingConfig, and returns error for invalid IPv4 and IPv6 addresses (`--ip` and `--ip6` in `docker create/run`).
|
||||
* `POST /containers/create` now takes a `Mounts` field in `HostConfig` which replaces `Binds` and `Volumes`. *note*: `Binds` and `Volumes` are still available but are exclusive with `Mounts`
|
||||
* `POST /build` now performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. Note that this change is _unversioned_ and applied to all API versions.
|
||||
* `POST /build` accepts `cachefrom` parameter to specify images used for build cache.
|
||||
|
||||
### v1.24 API changes
|
||||
|
||||
|
|
|
@ -1715,6 +1715,7 @@ or being killed.
|
|||
there must be a file with the corresponding path inside the tarball.
|
||||
- **q** – Suppress verbose build output.
|
||||
- **nocache** – Do not use the cache when building the image.
|
||||
- **cachefrom** - JSON array of images used for build cache resolution.
|
||||
- **pull** - Attempt to pull the image even if an older image exists locally.
|
||||
- **rm** - Remove intermediate containers after a successful build (default behavior).
|
||||
- **forcerm** - Always remove intermediate containers (includes `rm`).
|
||||
|
|
|
@ -106,6 +106,13 @@ the `Using cache` message in the console output.
|
|||
---> 7ea8aef582cc
|
||||
Successfully built 7ea8aef582cc
|
||||
|
||||
Build cache is only used from images that have a local parent chain. This means
|
||||
that these images were created by previous builds or the whole chain of images
|
||||
was loaded with `docker load`. If you wish to use build cache of a specific
|
||||
image you can specify it with `--cache-from` option. Images specified with
|
||||
`--cache-from` do not need to have a parent chain and may be pulled from other
|
||||
registries.
|
||||
|
||||
When you're done with your build, you're ready to look into [*Pushing a
|
||||
repository to its registry*](../tutorials/dockerrepos.md#contributing-to-docker-hub).
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ Build an image from a Dockerfile
|
|||
|
||||
Options:
|
||||
--build-arg value Set build-time variables (default [])
|
||||
--cache-from value Images to consider as cache sources (default [])
|
||||
--cgroup-parent string Optional parent cgroup for the container
|
||||
--cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period
|
||||
--cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota
|
||||
|
|
|
@ -6931,3 +6931,96 @@ func (s *DockerSuite) TestBuildWithFailure(c *check.C) {
|
|||
c.Assert(stdout, checker.Not(checker.Contains), "Step 1/2 : FROM busybox")
|
||||
c.Assert(stdout, checker.Not(checker.Contains), "Step 2/2 : RUN nobody")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildCacheFrom(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
|
||||
dockerfile := `
|
||||
FROM busybox
|
||||
ENV FOO=bar
|
||||
ADD baz /
|
||||
RUN touch bax`
|
||||
ctx, err := fakeContext(dockerfile, map[string]string{
|
||||
"Dockerfile": dockerfile,
|
||||
"baz": "baz",
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer ctx.Close()
|
||||
|
||||
id1, err := buildImageFromContext("build1", ctx, true)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// rebuild with cache-from
|
||||
id2, out, err := buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(id1, checker.Equals, id2)
|
||||
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3)
|
||||
dockerCmd(c, "rmi", "build2")
|
||||
|
||||
// no cache match with unknown source
|
||||
id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=nosuchtag")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(id1, checker.Not(checker.Equals), id2)
|
||||
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 0)
|
||||
dockerCmd(c, "rmi", "build2")
|
||||
|
||||
// clear parent images
|
||||
tempDir, err := ioutil.TempDir("", "test-build-cache-from-")
|
||||
if err != nil {
|
||||
c.Fatalf("failed to create temporary directory: %s", tempDir)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
tempFile := filepath.Join(tempDir, "img.tar")
|
||||
dockerCmd(c, "save", "-o", tempFile, "build1")
|
||||
dockerCmd(c, "rmi", "build1")
|
||||
dockerCmd(c, "load", "-i", tempFile)
|
||||
parentID, _ := dockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1")
|
||||
c.Assert(strings.TrimSpace(parentID), checker.Equals, "")
|
||||
|
||||
// cache still applies without parents
|
||||
id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(id1, checker.Equals, id2)
|
||||
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3)
|
||||
history1, _ := dockerCmd(c, "history", "-q", "build2")
|
||||
|
||||
// Retry, no new intermediate images
|
||||
id3, out, err := buildImageFromContextWithOut("build3", ctx, true, "--cache-from=build1")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(id1, checker.Equals, id3)
|
||||
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3)
|
||||
history2, _ := dockerCmd(c, "history", "-q", "build3")
|
||||
|
||||
c.Assert(history1, checker.Equals, history2)
|
||||
dockerCmd(c, "rmi", "build2")
|
||||
dockerCmd(c, "rmi", "build3")
|
||||
dockerCmd(c, "rmi", "build1")
|
||||
dockerCmd(c, "load", "-i", tempFile)
|
||||
|
||||
// Modify file, everything up to last command and layers are reused
|
||||
dockerfile = `
|
||||
FROM busybox
|
||||
ENV FOO=bar
|
||||
ADD baz /
|
||||
RUN touch newfile`
|
||||
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(id1, checker.Not(checker.Equals), id2)
|
||||
c.Assert(strings.Count(out, "Using cache"), checker.Equals, 2)
|
||||
|
||||
layers1Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1")
|
||||
layers2Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2")
|
||||
|
||||
var layers1 []string
|
||||
var layers2 []string
|
||||
c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil)
|
||||
c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil)
|
||||
|
||||
c.Assert(len(layers1), checker.Equals, len(layers2))
|
||||
for i := 0; i < len(layers1)-1; i++ {
|
||||
c.Assert(layers1[i], checker.Equals, layers2[i])
|
||||
}
|
||||
c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1])
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue