2015-11-18 17:20:54 -05:00
|
|
|
package daemon
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"runtime"
|
2017-02-05 06:39:37 -05:00
|
|
|
"strings"
|
2015-11-18 17:20:54 -05:00
|
|
|
"time"
|
|
|
|
|
2017-01-25 19:54:18 -05:00
|
|
|
"github.com/docker/distribution/reference"
|
2016-09-06 14:18:12 -04:00
|
|
|
"github.com/docker/docker/api/types/container"
|
2016-03-16 19:07:41 -04:00
|
|
|
"github.com/docker/docker/builder/dockerfile"
|
2015-11-18 17:20:54 -05:00
|
|
|
"github.com/docker/docker/dockerversion"
|
|
|
|
"github.com/docker/docker/image"
|
|
|
|
"github.com/docker/docker/layer"
|
2016-02-16 14:19:23 -05:00
|
|
|
"github.com/docker/docker/pkg/archive"
|
2015-11-18 17:20:54 -05:00
|
|
|
"github.com/docker/docker/pkg/httputils"
|
2015-11-13 19:59:01 -05:00
|
|
|
"github.com/docker/docker/pkg/progress"
|
2015-11-18 17:20:54 -05:00
|
|
|
"github.com/docker/docker/pkg/streamformatter"
|
2017-01-25 19:54:18 -05:00
|
|
|
"github.com/pkg/errors"
|
2015-11-18 17:20:54 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
// ImportImage imports an image, getting the archived layer data either from
|
|
|
|
// inConfig (if src is "-"), or from a URI specified in src. Progress output is
|
|
|
|
// written to outStream. Repository and tag names can optionally be given in
|
|
|
|
// the repo and tag arguments, respectively.
|
2016-04-07 17:29:18 -04:00
|
|
|
func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error {
|
2015-11-18 17:20:54 -05:00
|
|
|
var (
|
2016-04-07 17:29:18 -04:00
|
|
|
sf = streamformatter.NewJSONStreamFormatter()
|
|
|
|
rc io.ReadCloser
|
|
|
|
resp *http.Response
|
|
|
|
newRef reference.Named
|
2015-11-18 17:20:54 -05:00
|
|
|
)
|
|
|
|
|
2016-04-07 17:29:18 -04:00
|
|
|
if repository != "" {
|
|
|
|
var err error
|
2017-01-25 19:54:18 -05:00
|
|
|
newRef, err = reference.ParseNormalizedNamed(repository)
|
2016-04-07 17:29:18 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, isCanonical := newRef.(reference.Canonical); isCanonical {
|
|
|
|
return errors.New("cannot import digest reference")
|
|
|
|
}
|
|
|
|
|
|
|
|
if tag != "" {
|
|
|
|
newRef, err = reference.WithTag(newRef, tag)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-16 19:07:41 -04:00
|
|
|
config, err := dockerfile.BuildFromConfig(&container.Config{}, changes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-18 17:20:54 -05:00
|
|
|
if src == "-" {
|
2016-02-16 14:19:23 -05:00
|
|
|
rc = inConfig
|
2015-11-18 17:20:54 -05:00
|
|
|
} else {
|
|
|
|
inConfig.Close()
|
2017-02-05 06:39:37 -05:00
|
|
|
if len(strings.Split(src, "://")) == 1 {
|
|
|
|
src = "http://" + src
|
|
|
|
}
|
2015-11-18 17:20:54 -05:00
|
|
|
u, err := url.Parse(src)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-05 06:39:37 -05:00
|
|
|
|
2015-11-18 17:20:54 -05:00
|
|
|
resp, err = httputils.Download(u.String())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-05 06:39:37 -05:00
|
|
|
outStream.Write(sf.FormatStatus("", "Downloading from %s", u))
|
2015-11-13 19:59:01 -05:00
|
|
|
progressOutput := sf.NewProgressOutput(outStream, true)
|
2016-02-16 14:19:23 -05:00
|
|
|
rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing")
|
2015-11-18 17:20:54 -05:00
|
|
|
}
|
|
|
|
|
2016-02-16 14:19:23 -05:00
|
|
|
defer rc.Close()
|
2015-11-18 17:20:54 -05:00
|
|
|
if len(msg) == 0 {
|
|
|
|
msg = "Imported from " + src
|
|
|
|
}
|
2016-02-16 14:19:23 -05:00
|
|
|
|
|
|
|
inflatedLayerData, err := archive.DecompressStream(rc)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-18 17:20:54 -05:00
|
|
|
// TODO: support windows baselayer?
|
2016-02-16 14:19:23 -05:00
|
|
|
l, err := daemon.layerStore.Register(inflatedLayerData, "")
|
2015-11-18 17:20:54 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer layer.ReleaseAndLog(daemon.layerStore, l)
|
|
|
|
|
|
|
|
created := time.Now().UTC()
|
|
|
|
imgConfig, err := json.Marshal(&image.Image{
|
|
|
|
V1Image: image.V1Image{
|
|
|
|
DockerVersion: dockerversion.Version,
|
|
|
|
Config: config,
|
|
|
|
Architecture: runtime.GOARCH,
|
|
|
|
OS: runtime.GOOS,
|
|
|
|
Created: created,
|
|
|
|
Comment: msg,
|
|
|
|
},
|
|
|
|
RootFS: &image.RootFS{
|
|
|
|
Type: "layers",
|
|
|
|
DiffIDs: []layer.DiffID{l.DiffID()},
|
|
|
|
},
|
|
|
|
History: []image.History{{
|
|
|
|
Created: created,
|
|
|
|
Comment: msg,
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
id, err := daemon.imageStore.Create(imgConfig)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-12-04 16:55:15 -05:00
|
|
|
// FIXME: connect with commit code and call refstore directly
|
2015-11-18 17:20:54 -05:00
|
|
|
if newRef != nil {
|
2016-04-07 17:29:18 -04:00
|
|
|
if err := daemon.TagImageWithReference(id, newRef); err != nil {
|
2015-11-18 17:20:54 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-21 17:55:23 -05:00
|
|
|
daemon.LogImageEvent(id.String(), id.String(), "import")
|
2015-12-21 15:34:08 -05:00
|
|
|
outStream.Write(sf.FormatStatus("", id.String()))
|
2015-11-18 17:20:54 -05:00
|
|
|
return nil
|
|
|
|
}
|