2015-11-18 17:18:07 -05:00
package tarexport
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
2016-03-21 16:52:36 -04:00
"reflect"
2015-11-18 17:18:07 -05:00
"github.com/Sirupsen/logrus"
2016-05-25 22:11:51 -04:00
"github.com/docker/distribution"
2016-09-15 19:37:32 -04:00
"github.com/docker/distribution/digest"
2015-11-18 17:18:07 -05:00
"github.com/docker/docker/image"
"github.com/docker/docker/image/v1"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
2016-02-03 21:31:47 -05:00
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/stringid"
2015-11-18 17:18:07 -05:00
"github.com/docker/docker/pkg/symlink"
2016-11-01 18:44:06 -04:00
"github.com/docker/docker/pkg/system"
2015-12-04 16:55:15 -05:00
"github.com/docker/docker/reference"
2015-11-18 17:18:07 -05:00
)
2016-02-03 21:31:47 -05:00
func ( l * tarexporter ) Load ( inTar io . ReadCloser , outStream io . Writer , quiet bool ) error {
var (
sf = streamformatter . NewJSONStreamFormatter ( )
progressOutput progress . Output
)
if ! quiet {
progressOutput = sf . NewProgressOutput ( outStream , false )
}
2016-08-09 20:27:32 -04:00
outStream = & streamformatter . StdoutFormatter { Writer : outStream , StreamFormatter : streamformatter . NewJSONStreamFormatter ( ) }
2016-02-03 21:31:47 -05:00
2015-11-18 17:18:07 -05:00
tmpDir , err := ioutil . TempDir ( "" , "docker-import-" )
if err != nil {
return err
}
defer os . RemoveAll ( tmpDir )
if err := chrootarchive . Untar ( inTar , tmpDir , nil ) ; err != nil {
return err
}
// read manifest, if no file then load in legacy mode
manifestPath , err := safePath ( tmpDir , manifestFileName )
if err != nil {
return err
}
manifestFile , err := os . Open ( manifestPath )
if err != nil {
if os . IsNotExist ( err ) {
2016-02-03 21:31:47 -05:00
return l . legacyLoad ( tmpDir , outStream , progressOutput )
2015-11-18 17:18:07 -05:00
}
2016-08-31 03:37:52 -04:00
return err
2015-11-18 17:18:07 -05:00
}
defer manifestFile . Close ( )
var manifest [ ] manifestItem
if err := json . NewDecoder ( manifestFile ) . Decode ( & manifest ) ; err != nil {
return err
}
2016-03-21 16:52:36 -04:00
var parentLinks [ ] parentLink
2016-05-25 21:25:12 -04:00
var imageIDsStr string
var imageRefCount int
2016-03-21 16:52:36 -04:00
2015-11-18 17:18:07 -05:00
for _ , m := range manifest {
configPath , err := safePath ( tmpDir , m . Config )
if err != nil {
return err
}
config , err := ioutil . ReadFile ( configPath )
if err != nil {
return err
}
img , err := image . NewFromJSON ( config )
if err != nil {
return err
}
var rootFS image . RootFS
rootFS = * img . RootFS
rootFS . DiffIDs = nil
if expected , actual := len ( m . Layers ) , len ( img . RootFS . DiffIDs ) ; expected != actual {
return fmt . Errorf ( "invalid manifest, layers length mismatch: expected %q, got %q" , expected , actual )
}
for i , diffID := range img . RootFS . DiffIDs {
layerPath , err := safePath ( tmpDir , m . Layers [ i ] )
if err != nil {
return err
}
2015-12-16 16:46:40 -05:00
r := rootFS
r . Append ( diffID )
newLayer , err := l . ls . Get ( r . ChainID ( ) )
2015-11-18 17:18:07 -05:00
if err != nil {
2016-05-25 22:11:51 -04:00
newLayer , err = l . loadLayer ( layerPath , rootFS , diffID . String ( ) , m . LayerSources [ diffID ] , progressOutput )
2015-12-16 16:46:40 -05:00
if err != nil {
return err
}
2015-11-18 17:18:07 -05:00
}
defer layer . ReleaseAndLog ( l . ls , newLayer )
if expected , actual := diffID , newLayer . DiffID ( ) ; expected != actual {
return fmt . Errorf ( "invalid diffID for layer %d: expected %q, got %q" , i , expected , actual )
}
rootFS . Append ( diffID )
}
imgID , err := l . is . Create ( config )
if err != nil {
return err
}
2016-05-25 21:25:12 -04:00
imageIDsStr += fmt . Sprintf ( "Loaded image ID: %s\n" , imgID )
2015-11-18 17:18:07 -05:00
2016-05-25 21:25:12 -04:00
imageRefCount = 0
2015-11-18 17:18:07 -05:00
for _ , repoTag := range m . RepoTags {
named , err := reference . ParseNamed ( repoTag )
if err != nil {
return err
}
ref , ok := named . ( reference . NamedTagged )
if ! ok {
return fmt . Errorf ( "invalid tag %q" , repoTag )
}
2016-09-15 19:37:32 -04:00
l . setLoadedTag ( ref , imgID . Digest ( ) , outStream )
2016-05-25 21:25:12 -04:00
outStream . Write ( [ ] byte ( fmt . Sprintf ( "Loaded image: %s\n" , ref ) ) )
imageRefCount ++
2015-11-18 17:18:07 -05:00
}
2016-03-21 16:52:36 -04:00
parentLinks = append ( parentLinks , parentLink { imgID , m . Parent } )
2016-04-19 00:45:59 -04:00
l . loggerImgEvent . LogImageEvent ( imgID . String ( ) , imgID . String ( ) , "load" )
2016-03-21 16:52:36 -04:00
}
for _ , p := range validatedParentLinks ( parentLinks ) {
if p . parentID != "" {
if err := l . setParentID ( p . id , p . parentID ) ; err != nil {
return err
}
}
2015-11-18 17:18:07 -05:00
}
2016-05-25 21:25:12 -04:00
if imageRefCount == 0 {
outStream . Write ( [ ] byte ( imageIDsStr ) )
}
2015-11-18 17:18:07 -05:00
return nil
}
2016-03-21 16:52:36 -04:00
func ( l * tarexporter ) setParentID ( id , parentID image . ID ) error {
img , err := l . is . Get ( id )
if err != nil {
return err
}
parent , err := l . is . Get ( parentID )
if err != nil {
return err
}
if ! checkValidParent ( img , parent ) {
2016-05-02 13:26:32 -04:00
return fmt . Errorf ( "image %v is not a valid parent for %v" , parent . ID ( ) , img . ID ( ) )
2016-03-21 16:52:36 -04:00
}
return l . is . SetParent ( id , parentID )
}
2016-06-06 20:49:34 -04:00
func ( l * tarexporter ) loadLayer ( filename string , rootFS image . RootFS , id string , foreignSrc distribution . Descriptor , progressOutput progress . Output ) ( layer . Layer , error ) {
2016-11-01 18:44:06 -04:00
// We use system.OpenSequential to use sequential file access on Windows, avoiding
// depleting the standby list. On Linux, this equates to a regular os.Open.
rawTar , err := system . OpenSequential ( filename )
2015-11-18 17:18:07 -05:00
if err != nil {
logrus . Debugf ( "Error reading embedded tar: %v" , err )
return nil , err
}
2015-11-23 20:20:44 -05:00
defer rawTar . Close ( )
2016-06-24 18:15:59 -04:00
var r io . Reader
2016-02-03 21:31:47 -05:00
if progressOutput != nil {
2016-06-24 18:15:59 -04:00
fileInfo , err := rawTar . Stat ( )
2016-02-03 21:31:47 -05:00
if err != nil {
logrus . Debugf ( "Error statting file: %v" , err )
return nil , err
}
2016-06-24 18:15:59 -04:00
r = progress . NewProgressReader ( rawTar , progressOutput , fileInfo . Size ( ) , stringid . TruncateID ( id ) , "Loading layer" )
} else {
r = rawTar
}
2016-06-06 20:49:34 -04:00
2016-06-24 18:15:59 -04:00
inflatedLayerData , err := archive . DecompressStream ( r )
if err != nil {
return nil , err
2016-06-06 20:49:34 -04:00
}
2016-06-24 18:15:59 -04:00
defer inflatedLayerData . Close ( )
2016-06-06 20:49:34 -04:00
if ds , ok := l . ls . ( layer . DescribableStore ) ; ok {
return ds . RegisterWithDescriptor ( inflatedLayerData , rootFS . ChainID ( ) , foreignSrc )
2016-02-03 21:31:47 -05:00
}
2016-06-06 20:49:34 -04:00
return l . ls . Register ( inflatedLayerData , rootFS . ChainID ( ) )
2015-11-18 17:18:07 -05:00
}
2016-09-15 19:37:32 -04:00
func ( l * tarexporter ) setLoadedTag ( ref reference . NamedTagged , imgID digest . Digest , outStream io . Writer ) error {
2015-12-04 16:55:15 -05:00
if prevID , err := l . rs . Get ( ref ) ; err == nil && prevID != imgID {
2015-11-18 17:18:07 -05:00
fmt . Fprintf ( outStream , "The image %s already exists, renaming the old one with ID %s to empty string\n" , ref . String ( ) , string ( prevID ) ) // todo: this message is wrong in case of multiple tags
}
2015-12-04 16:55:15 -05:00
if err := l . rs . AddTag ( ref , imgID , true ) ; err != nil {
2015-11-18 17:18:07 -05:00
return err
}
return nil
}
2016-02-03 21:31:47 -05:00
func ( l * tarexporter ) legacyLoad ( tmpDir string , outStream io . Writer , progressOutput progress . Output ) error {
2015-11-18 17:18:07 -05:00
legacyLoadedMap := make ( map [ string ] image . ID )
dirs , err := ioutil . ReadDir ( tmpDir )
if err != nil {
return err
}
// every dir represents an image
for _ , d := range dirs {
if d . IsDir ( ) {
2016-02-03 21:31:47 -05:00
if err := l . legacyLoadImage ( d . Name ( ) , tmpDir , legacyLoadedMap , progressOutput ) ; err != nil {
2015-11-18 17:18:07 -05:00
return err
}
}
}
// load tags from repositories file
repositoriesPath , err := safePath ( tmpDir , legacyRepositoriesFileName )
if err != nil {
return err
}
repositoriesFile , err := os . Open ( repositoriesPath )
if err != nil {
2016-08-31 03:37:52 -04:00
return err
2015-11-18 17:18:07 -05:00
}
defer repositoriesFile . Close ( )
repositories := make ( map [ string ] map [ string ] string )
if err := json . NewDecoder ( repositoriesFile ) . Decode ( & repositories ) ; err != nil {
return err
}
for name , tagMap := range repositories {
for tag , oldID := range tagMap {
imgID , ok := legacyLoadedMap [ oldID ]
if ! ok {
return fmt . Errorf ( "invalid target ID: %v" , oldID )
}
named , err := reference . WithName ( name )
if err != nil {
return err
}
ref , err := reference . WithTag ( named , tag )
if err != nil {
return err
}
2016-09-15 19:37:32 -04:00
l . setLoadedTag ( ref , imgID . Digest ( ) , outStream )
2015-11-18 17:18:07 -05:00
}
}
return nil
}
2016-02-03 21:31:47 -05:00
func ( l * tarexporter ) legacyLoadImage ( oldID , sourceDir string , loadedMap map [ string ] image . ID , progressOutput progress . Output ) error {
2015-11-18 17:18:07 -05:00
if _ , loaded := loadedMap [ oldID ] ; loaded {
return nil
}
configPath , err := safePath ( sourceDir , filepath . Join ( oldID , legacyConfigFileName ) )
if err != nil {
return err
}
imageJSON , err := ioutil . ReadFile ( configPath )
if err != nil {
logrus . Debugf ( "Error reading json: %v" , err )
return err
}
var img struct { Parent string }
if err := json . Unmarshal ( imageJSON , & img ) ; err != nil {
return err
}
var parentID image . ID
if img . Parent != "" {
for {
var loaded bool
if parentID , loaded = loadedMap [ img . Parent ] ; ! loaded {
2016-02-03 21:31:47 -05:00
if err := l . legacyLoadImage ( img . Parent , sourceDir , loadedMap , progressOutput ) ; err != nil {
2015-11-18 17:18:07 -05:00
return err
}
} else {
break
}
}
}
// todo: try to connect with migrate code
rootFS := image . NewRootFS ( )
var history [ ] image . History
if parentID != "" {
parentImg , err := l . is . Get ( parentID )
if err != nil {
return err
}
rootFS = parentImg . RootFS
history = parentImg . History
}
layerPath , err := safePath ( sourceDir , filepath . Join ( oldID , legacyLayerFileName ) )
if err != nil {
return err
}
2016-06-06 20:49:34 -04:00
newLayer , err := l . loadLayer ( layerPath , * rootFS , oldID , distribution . Descriptor { } , progressOutput )
2015-11-18 17:18:07 -05:00
if err != nil {
return err
}
rootFS . Append ( newLayer . DiffID ( ) )
h , err := v1 . HistoryFromConfig ( imageJSON , false )
if err != nil {
return err
}
history = append ( history , h )
config , err := v1 . MakeConfigFromV1Config ( imageJSON , rootFS , history )
if err != nil {
return err
}
imgID , err := l . is . Create ( config )
if err != nil {
return err
}
metadata , err := l . ls . Release ( newLayer )
layer . LogReleaseMetadata ( metadata )
if err != nil {
return err
}
if parentID != "" {
if err := l . is . SetParent ( imgID , parentID ) ; err != nil {
return err
}
}
loadedMap [ oldID ] = imgID
return nil
}
func safePath ( base , path string ) ( string , error ) {
return symlink . FollowSymlinkInScope ( filepath . Join ( base , path ) , base )
}
2016-03-21 16:52:36 -04:00
type parentLink struct {
id , parentID image . ID
}
func validatedParentLinks ( pl [ ] parentLink ) ( ret [ ] parentLink ) {
mainloop :
for i , p := range pl {
ret = append ( ret , p )
for _ , p2 := range pl {
if p2 . id == p . parentID && p2 . id != p . id {
continue mainloop
}
}
ret [ i ] . parentID = ""
}
return
}
func checkValidParent ( img , parent * image . Image ) bool {
if len ( img . History ) == 0 && len ( parent . History ) == 0 {
return true // having history is not mandatory
}
if len ( img . History ) - len ( parent . History ) != 1 {
return false
}
for i , h := range parent . History {
if ! reflect . DeepEqual ( h , img . History [ i ] ) {
return false
}
}
return true
}