2018-02-05 16:05:59 -05:00
package distribution // import "github.com/docker/docker/distribution"
2015-11-18 17:18:44 -05:00
import (
2018-04-19 18:30:59 -04:00
"context"
2015-11-18 17:18:44 -05:00
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
2020-10-30 15:47:06 -04:00
"github.com/containerd/containerd/log"
2018-06-26 17:49:33 -04:00
"github.com/containerd/containerd/platforms"
2015-11-18 17:18:44 -05:00
"github.com/docker/distribution"
2015-12-16 22:19:22 -05:00
"github.com/docker/distribution/manifest/manifestlist"
2019-10-10 17:33:15 -04:00
"github.com/docker/distribution/manifest/ocischema"
2015-11-18 17:18:44 -05:00
"github.com/docker/distribution/manifest/schema1"
2015-12-11 18:24:12 -05:00
"github.com/docker/distribution/manifest/schema2"
2017-01-25 19:54:18 -05:00
"github.com/docker/distribution/reference"
2016-01-26 14:19:18 -05:00
"github.com/docker/distribution/registry/client/transport"
2015-11-18 17:18:44 -05:00
"github.com/docker/docker/distribution/metadata"
2015-11-13 19:59:01 -05:00
"github.com/docker/docker/distribution/xfer"
2015-11-18 17:18:44 -05:00
"github.com/docker/docker/image"
2019-06-14 21:56:28 -04:00
v1 "github.com/docker/docker/image/v1"
2015-11-18 17:18:44 -05:00
"github.com/docker/docker/layer"
2015-11-13 19:59:01 -05:00
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
2015-11-18 17:18:44 -05:00
"github.com/docker/docker/pkg/stringid"
2017-06-19 22:42:48 -04:00
"github.com/docker/docker/pkg/system"
2017-01-25 19:54:18 -05:00
refstore "github.com/docker/docker/reference"
2015-11-18 17:18:44 -05:00
"github.com/docker/docker/registry"
2019-08-05 10:37:47 -04:00
digest "github.com/opencontainers/go-digest"
2018-02-15 16:17:27 -05:00
specs "github.com/opencontainers/image-spec/specs-go/v1"
2017-07-19 10:20:13 -04:00
"github.com/pkg/errors"
2017-07-26 17:42:13 -04:00
"github.com/sirupsen/logrus"
2015-11-18 17:18:44 -05:00
)
2016-08-10 15:04:42 -04:00
var (
2016-12-16 14:19:05 -05:00
errRootFSMismatch = errors . New ( "layers from manifest don't match image configuration" )
errRootFSInvalid = errors . New ( "invalid rootfs in image configuration" )
2016-08-10 15:04:42 -04:00
)
2015-12-11 18:24:12 -05:00
2016-02-11 17:08:49 -05:00
// ImageConfigPullError is an error pulling the image config blob
// (only applies to schema2).
type ImageConfigPullError struct {
Err error
}
// Error returns the error string for ImageConfigPullError.
func ( e ImageConfigPullError ) Error ( ) string {
return "error pulling image configuration: " + e . Err . Error ( )
}
2015-11-18 17:18:44 -05:00
type v2Puller struct {
2016-09-21 11:01:09 -04:00
V2MetadataService metadata . V2MetadataService
2016-01-13 22:34:27 -05:00
endpoint registry . APIEndpoint
config * ImagePullConfig
repoInfo * registry . RepositoryInfo
repo distribution . Repository
2019-06-17 21:42:24 -04:00
manifestStore * manifestStore
2015-11-18 17:18:44 -05:00
}
2018-06-26 17:49:33 -04:00
func ( p * v2Puller ) Pull ( ctx context . Context , ref reference . Named , platform * specs . Platform ) ( err error ) {
2015-11-18 17:18:44 -05:00
// TODO(tiborvass): was ReceiveTimeout
2019-06-17 21:42:24 -04:00
p . repo , err = NewV2Repository ( ctx , p . repoInfo , p . endpoint , p . config . MetaHeaders , p . config . AuthConfig , "pull" )
2015-11-18 17:18:44 -05:00
if err != nil {
2015-11-13 19:59:01 -05:00
logrus . Warnf ( "Error getting v2 registry: %v" , err )
2016-02-11 18:45:29 -05:00
return err
2015-11-18 17:18:44 -05:00
}
2020-10-30 15:47:06 -04:00
p . manifestStore . remote , err = p . repo . Manifests ( ctx )
if err != nil {
return err
}
2018-06-26 17:49:33 -04:00
if err = p . pullV2Repository ( ctx , ref , platform ) ; err != nil {
2015-12-23 18:21:43 -05:00
if _ , ok := err . ( fallbackError ) ; ok {
return err
}
2017-11-14 19:06:17 -05:00
if continueOnError ( err , p . endpoint . Mirror ) {
2016-02-11 18:45:29 -05:00
return fallbackError {
err : err ,
transportOK : true ,
}
2015-11-18 17:18:44 -05:00
}
}
2015-12-04 16:42:33 -05:00
return err
2015-11-18 17:18:44 -05:00
}
2018-06-26 17:49:33 -04:00
func ( p * v2Puller ) pullV2Repository ( ctx context . Context , ref reference . Named , platform * specs . Platform ) ( err error ) {
2015-12-23 18:21:43 -05:00
var layersDownloaded bool
2015-12-10 14:01:34 -05:00
if ! reference . IsNameOnly ( ref ) {
2018-06-26 17:49:33 -04:00
layersDownloaded , err = p . pullV2Tag ( ctx , ref , platform )
2015-12-23 18:21:43 -05:00
if err != nil {
return err
}
2015-11-18 17:18:44 -05:00
} else {
2015-12-08 14:14:02 -05:00
tags , err := p . repo . Tags ( ctx ) . All ( ctx )
2015-11-18 17:18:44 -05:00
if err != nil {
2019-06-17 21:42:24 -04:00
return err
2015-11-18 17:18:44 -05:00
}
for _ , tag := range tags {
2015-12-11 14:00:13 -05:00
tagRef , err := reference . WithTag ( ref , tag )
2015-11-18 17:18:44 -05:00
if err != nil {
return err
}
2018-06-26 17:49:33 -04:00
pulledNew , err := p . pullV2Tag ( ctx , tagRef , platform )
2015-12-23 18:21:43 -05:00
if err != nil {
// Since this is the pull-all-tags case, don't
// allow an error pulling a particular tag to
// make the whole pull fall back to v1.
if fallbackErr , ok := err . ( fallbackError ) ; ok {
return fallbackErr . err
}
return err
}
// pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
// TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
layersDownloaded = layersDownloaded || pulledNew
2015-11-18 17:18:44 -05:00
}
}
2017-01-25 19:54:18 -05:00
writeStatus ( reference . FamiliarString ( ref ) , p . config . ProgressOutput , layersDownloaded )
2015-11-18 17:18:44 -05:00
return nil
}
2015-11-13 19:59:01 -05:00
type v2LayerDescriptor struct {
2016-01-13 22:34:27 -05:00
digest digest . Digest
2017-05-05 13:56:40 -04:00
diffID layer . DiffID
2016-01-13 22:34:27 -05:00
repoInfo * registry . RepositoryInfo
repo distribution . Repository
2016-09-21 11:01:09 -04:00
V2MetadataService metadata . V2MetadataService
2016-01-25 21:20:18 -05:00
tmpFile * os . File
2016-01-26 14:19:18 -05:00
verifier digest . Verifier
2016-06-06 20:49:34 -04:00
src distribution . Descriptor
2015-11-18 17:18:44 -05:00
}
2015-11-13 19:59:01 -05:00
func ( ld * v2LayerDescriptor ) Key ( ) string {
return "v2:" + ld . digest . String ( )
}
2015-11-18 17:18:44 -05:00
2015-11-13 19:59:01 -05:00
func ( ld * v2LayerDescriptor ) ID ( ) string {
return stringid . TruncateID ( ld . digest . String ( ) )
}
2015-11-18 17:18:44 -05:00
2015-11-13 19:59:01 -05:00
func ( ld * v2LayerDescriptor ) DiffID ( ) ( layer . DiffID , error ) {
2017-05-05 13:56:40 -04:00
if ld . diffID != "" {
return ld . diffID , nil
}
2016-01-13 22:34:27 -05:00
return ld . V2MetadataService . GetDiffID ( ld . digest )
2015-11-13 19:59:01 -05:00
}
func ( ld * v2LayerDescriptor ) Download ( ctx context . Context , progressOutput progress . Output ) ( io . ReadCloser , int64 , error ) {
logrus . Debugf ( "pulling blob %q" , ld . digest )
2015-11-18 17:18:44 -05:00
2016-01-26 14:19:18 -05:00
var (
err error
offset int64
)
2016-01-25 21:20:18 -05:00
if ld . tmpFile == nil {
ld . tmpFile , err = createDownloadFile ( )
2016-01-26 14:19:18 -05:00
if err != nil {
return nil , 0 , xfer . DoNotRetry { Err : err }
}
2016-01-25 21:20:18 -05:00
} else {
2019-11-25 12:51:57 -05:00
offset , err = ld . tmpFile . Seek ( 0 , io . SeekEnd )
2016-01-26 14:19:18 -05:00
if err != nil {
logrus . Debugf ( "error seeking to end of download file: %v" , err )
offset = 0
ld . tmpFile . Close ( )
if err := os . Remove ( ld . tmpFile . Name ( ) ) ; err != nil {
logrus . Errorf ( "Failed to remove temp file: %s" , ld . tmpFile . Name ( ) )
}
ld . tmpFile , err = createDownloadFile ( )
if err != nil {
return nil , 0 , xfer . DoNotRetry { Err : err }
}
} else if offset != 0 {
logrus . Debugf ( "attempting to resume download of %q from %d bytes" , ld . digest , offset )
}
2016-01-25 21:20:18 -05:00
}
tmpFile := ld . tmpFile
2015-11-18 17:18:44 -05:00
2016-05-25 22:11:51 -04:00
layerDownload , err := ld . open ( ctx )
2015-11-18 17:18:44 -05:00
if err != nil {
2016-02-11 17:08:49 -05:00
logrus . Errorf ( "Error initiating layer download: %v" , err )
2015-11-13 19:59:01 -05:00
return nil , 0 , retryOnError ( err )
2015-11-18 17:18:44 -05:00
}
2016-01-26 14:19:18 -05:00
if offset != 0 {
2019-08-05 23:12:23 -04:00
_ , err := layerDownload . Seek ( offset , io . SeekStart )
2016-01-26 14:19:18 -05:00
if err != nil {
if err := ld . truncateDownloadFile ( ) ; err != nil {
return nil , 0 , xfer . DoNotRetry { Err : err }
}
return nil , 0 , err
}
}
2019-11-25 12:51:57 -05:00
size , err := layerDownload . Seek ( 0 , io . SeekEnd )
2015-12-03 21:23:21 -05:00
if err != nil {
// Seek failed, perhaps because there was no Content-Length
// header. This shouldn't fail the download, because we can
// still continue without a progress bar.
2015-11-13 19:59:01 -05:00
size = 0
2015-12-03 21:23:21 -05:00
} else {
2016-01-26 14:19:18 -05:00
if size != 0 && offset > size {
2016-06-11 16:16:55 -04:00
logrus . Debug ( "Partial download is larger than full blob. Starting over" )
2016-01-26 14:19:18 -05:00
offset = 0
if err := ld . truncateDownloadFile ( ) ; err != nil {
return nil , 0 , xfer . DoNotRetry { Err : err }
}
}
// Restore the seek offset either at the beginning of the
// stream, or just after the last byte we have from previous
// attempts.
2019-08-05 23:12:23 -04:00
_ , err = layerDownload . Seek ( offset , io . SeekStart )
2015-12-03 21:23:21 -05:00
if err != nil {
2015-11-13 19:59:01 -05:00
return nil , 0 , err
2015-12-03 21:23:21 -05:00
}
}
2016-01-26 14:19:18 -05:00
reader := progress . NewProgressReader ( ioutils . NewCancelReadCloser ( ctx , layerDownload ) , progressOutput , size - offset , ld . ID ( ) , "Downloading" )
2015-11-13 19:59:01 -05:00
defer reader . Close ( )
2016-01-26 14:19:18 -05:00
if ld . verifier == nil {
2017-01-06 20:23:18 -05:00
ld . verifier = ld . digest . Verifier ( )
2015-11-18 17:18:44 -05:00
}
2016-01-26 14:19:18 -05:00
_ , err = io . Copy ( tmpFile , io . TeeReader ( reader , ld . verifier ) )
2015-11-13 19:59:01 -05:00
if err != nil {
2016-01-26 14:19:18 -05:00
if err == transport . ErrWrongCodeForByteRange {
if err := ld . truncateDownloadFile ( ) ; err != nil {
return nil , 0 , xfer . DoNotRetry { Err : err }
}
return nil , 0 , err
2016-01-25 21:20:18 -05:00
}
2015-11-13 19:59:01 -05:00
return nil , 0 , retryOnError ( err )
}
2015-11-18 17:18:44 -05:00
2015-11-13 19:59:01 -05:00
progress . Update ( progressOutput , ld . ID ( ) , "Verifying Checksum" )
2015-11-18 17:18:44 -05:00
2016-01-26 14:19:18 -05:00
if ! ld . verifier . Verified ( ) {
2015-11-13 19:59:01 -05:00
err = fmt . Errorf ( "filesystem layer verification failed for digest %s" , ld . digest )
2015-11-18 17:18:44 -05:00
logrus . Error ( err )
2016-01-25 21:20:18 -05:00
2016-01-26 14:19:18 -05:00
// Allow a retry if this digest verification error happened
// after a resumed download.
if offset != 0 {
if err := ld . truncateDownloadFile ( ) ; err != nil {
return nil , 0 , xfer . DoNotRetry { Err : err }
}
2015-11-13 19:59:01 -05:00
2016-01-26 14:19:18 -05:00
return nil , 0 , err
}
2015-11-13 19:59:01 -05:00
return nil , 0 , xfer . DoNotRetry { Err : err }
2015-11-18 17:18:44 -05:00
}
2015-11-13 19:59:01 -05:00
progress . Update ( progressOutput , ld . ID ( ) , "Download complete" )
2015-11-18 17:18:44 -05:00
2015-11-13 19:59:01 -05:00
logrus . Debugf ( "Downloaded %s to tempfile %s" , ld . ID ( ) , tmpFile . Name ( ) )
2019-08-05 23:12:23 -04:00
_ , err = tmpFile . Seek ( 0 , io . SeekStart )
2016-01-25 21:20:18 -05:00
if err != nil {
tmpFile . Close ( )
if err := os . Remove ( tmpFile . Name ( ) ) ; err != nil {
logrus . Errorf ( "Failed to remove temp file: %s" , tmpFile . Name ( ) )
}
2016-01-25 21:20:18 -05:00
ld . tmpFile = nil
2016-01-26 14:19:18 -05:00
ld . verifier = nil
2016-01-25 21:20:18 -05:00
return nil , 0 , xfer . DoNotRetry { Err : err }
}
2016-03-30 22:26:12 -04:00
// hand off the temporary file to the download manager, so it will only
// be closed once
ld . tmpFile = nil
return ioutils . NewReadCloserWrapper ( tmpFile , func ( ) error {
tmpFile . Close ( )
err := os . RemoveAll ( tmpFile . Name ( ) )
if err != nil {
logrus . Errorf ( "Failed to remove temp file: %s" , tmpFile . Name ( ) )
}
return err
} ) , size , nil
2016-01-25 21:20:18 -05:00
}
func ( ld * v2LayerDescriptor ) Close ( ) {
if ld . tmpFile != nil {
ld . tmpFile . Close ( )
if err := os . RemoveAll ( ld . tmpFile . Name ( ) ) ; err != nil {
logrus . Errorf ( "Failed to remove temp file: %s" , ld . tmpFile . Name ( ) )
}
}
2015-11-13 19:59:01 -05:00
}
2015-11-18 17:18:44 -05:00
2016-01-26 14:19:18 -05:00
func ( ld * v2LayerDescriptor ) truncateDownloadFile ( ) error {
// Need a new hash context since we will be redoing the download
ld . verifier = nil
2019-08-05 23:12:23 -04:00
if _ , err := ld . tmpFile . Seek ( 0 , io . SeekStart ) ; err != nil {
2016-02-11 17:08:49 -05:00
logrus . Errorf ( "error seeking to beginning of download file: %v" , err )
2016-01-26 14:19:18 -05:00
return err
}
if err := ld . tmpFile . Truncate ( 0 ) ; err != nil {
2016-02-11 17:08:49 -05:00
logrus . Errorf ( "error truncating download file: %v" , err )
2016-01-26 14:19:18 -05:00
return err
}
return nil
}
2015-11-13 19:59:01 -05:00
func ( ld * v2LayerDescriptor ) Registered ( diffID layer . DiffID ) {
// Cache mapping from this layer's DiffID to the blobsum
2017-01-25 19:54:18 -05:00
ld . V2MetadataService . Add ( diffID , metadata . V2Metadata { Digest : ld . digest , SourceRepository : ld . repoInfo . Name . Name ( ) } )
2015-11-18 17:18:44 -05:00
}
2018-06-26 17:49:33 -04:00
func ( p * v2Puller ) pullV2Tag ( ctx context . Context , ref reference . Named , platform * specs . Platform ) ( tagUpdated bool , err error ) {
2015-12-08 14:14:02 -05:00
var (
tagOrDigest string // Used for logging/progress only
2020-10-30 15:47:06 -04:00
dgst digest . Digest
mt string
size int64
2020-12-03 16:15:18 -05:00
tagged reference . NamedTagged
isTagged bool
2015-12-08 14:14:02 -05:00
)
2017-05-15 20:17:27 -04:00
if digested , isDigested := ref . ( reference . Canonical ) ; isDigested {
2020-10-30 15:47:06 -04:00
dgst = digested . Digest ( )
tagOrDigest = digested . String ( )
2020-12-03 16:15:18 -05:00
} else if tagged , isTagged = ref . ( reference . NamedTagged ) ; isTagged {
2020-10-29 21:52:23 -04:00
tagService := p . repo . Tags ( ctx )
desc , err := tagService . Get ( ctx , tagged . Tag ( ) )
2017-05-15 20:17:27 -04:00
if err != nil {
2019-06-17 21:42:24 -04:00
return false , err
2017-05-15 20:17:27 -04:00
}
2020-12-03 16:15:18 -05:00
2020-10-30 15:47:06 -04:00
dgst = desc . Digest
2017-05-15 20:17:27 -04:00
tagOrDigest = tagged . Tag ( )
2020-10-30 15:47:06 -04:00
mt = desc . MediaType
size = desc . Size
2015-11-18 17:18:44 -05:00
} else {
2017-01-25 19:54:18 -05:00
return false , fmt . Errorf ( "internal error: reference has neither a tag nor a digest: %s" , reference . FamiliarString ( ref ) )
2015-11-18 17:18:44 -05:00
}
2020-10-30 15:47:06 -04:00
ctx = log . WithLogger ( ctx , logrus . WithFields (
logrus . Fields {
"digest" : dgst ,
"remote" : ref ,
} ) )
2020-12-03 16:15:18 -05:00
desc := specs . Descriptor {
2020-10-30 15:47:06 -04:00
MediaType : mt ,
Digest : dgst ,
Size : size ,
2020-12-03 16:15:18 -05:00
}
manifest , err := p . manifestStore . Get ( ctx , desc )
2020-10-30 15:47:06 -04:00
if err != nil {
2020-12-03 16:15:18 -05:00
if isTagged && isNotFound ( errors . Cause ( err ) ) {
logrus . WithField ( "ref" , ref ) . WithError ( err ) . Debug ( "Falling back to pull manifest by tag" )
msg := ` % s Failed to pull manifest by the resolved digest . This registry does not
appear to conform to the distribution registry specification ; falling back to
pull by tag . This fallback is DEPRECATED , and will be removed in a future
release . Please contact admins of % s . % s
`
warnEmoji := "\U000026A0\U0000FE0F"
progress . Messagef ( p . config . ProgressOutput , "WARNING" , msg , warnEmoji , p . endpoint . URL , warnEmoji )
// Fetch by tag worked, but fetch by digest didn't.
// This is a broken registry implementation.
// We'll fallback to the old behavior and get the manifest by tag.
var ms distribution . ManifestService
ms , err = p . repo . Manifests ( ctx )
if err != nil {
return false , err
}
manifest , err = ms . Get ( ctx , "" , distribution . WithTag ( tagged . Tag ( ) ) )
err = errors . Wrap ( err , "error after falling back to get manifest by tag" )
}
if err != nil {
return false , err
}
2020-10-30 15:47:06 -04:00
}
2015-12-08 14:14:02 -05:00
if manifest == nil {
return false , fmt . Errorf ( "image manifest does not exist for tag or digest %q" , tagOrDigest )
}
2015-11-18 17:18:44 -05:00
2016-08-10 15:04:42 -04:00
if m , ok := manifest . ( * schema2 . DeserializedManifest ) ; ok {
2016-12-16 14:19:05 -05:00
var allowedMediatype bool
for _ , t := range p . config . Schema2Types {
if m . Manifest . Config . MediaType == t {
allowedMediatype = true
break
}
}
if ! allowedMediatype {
configClass := mediaTypeClasses [ m . Manifest . Config . MediaType ]
if configClass == "" {
configClass = "unknown"
}
2017-07-19 10:20:13 -04:00
return false , invalidManifestClassError { m . Manifest . Config . MediaType , configClass }
2016-08-10 15:04:42 -04:00
}
}
2017-01-25 19:54:18 -05:00
logrus . Debugf ( "Pulling ref from V2 registry: %s" , reference . FamiliarString ( ref ) )
progress . Message ( p . config . ProgressOutput , tagOrDigest , "Pulling from " + reference . FamiliarName ( p . repo . Named ( ) ) )
2015-12-08 14:14:02 -05:00
2015-12-11 18:24:12 -05:00
var (
2016-09-15 19:37:32 -04:00
id digest . Digest
2015-12-11 18:24:12 -05:00
manifestDigest digest . Digest
)
2015-12-08 14:14:02 -05:00
switch v := manifest . ( type ) {
case * schema1 . SignedManifest :
2016-12-16 14:19:05 -05:00
if p . config . RequireSchema2 {
return false , fmt . Errorf ( "invalid manifest: not schema2" )
}
2019-08-13 14:13:14 -04:00
// give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago
// TODO: condition to be removed
if reference . Domain ( ref ) == "docker.io" {
msg := fmt . Sprintf ( "Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/" , ref )
logrus . Warn ( msg )
progress . Message ( p . config . ProgressOutput , "" , msg )
}
2019-06-14 21:56:28 -04:00
2018-06-26 17:49:33 -04:00
id , manifestDigest , err = p . pullSchema1 ( ctx , ref , v , platform )
2015-12-11 18:24:12 -05:00
if err != nil {
return false , err
}
case * schema2 . DeserializedManifest :
2018-06-26 17:49:33 -04:00
id , manifestDigest , err = p . pullSchema2 ( ctx , ref , v , platform )
2015-12-08 14:14:02 -05:00
if err != nil {
return false , err
}
2019-10-10 17:33:15 -04:00
case * ocischema . DeserializedManifest :
id , manifestDigest , err = p . pullOCI ( ctx , ref , v , platform )
if err != nil {
return false , err
}
2015-12-16 22:19:22 -05:00
case * manifestlist . DeserializedManifestList :
2018-06-26 17:49:33 -04:00
id , manifestDigest , err = p . pullManifestList ( ctx , ref , v , platform )
2015-12-16 22:19:22 -05:00
if err != nil {
return false , err
}
2015-12-08 14:14:02 -05:00
default :
2017-07-19 10:20:13 -04:00
return false , invalidManifestFormatError { }
2015-11-18 17:18:44 -05:00
}
2015-12-11 18:24:12 -05:00
progress . Message ( p . config . ProgressOutput , "" , "Digest: " + manifestDigest . String ( ) )
2016-12-16 14:19:05 -05:00
if p . config . ReferenceStore != nil {
oldTagID , err := p . config . ReferenceStore . Get ( ref )
if err == nil {
if oldTagID == id {
return false , addDigestReference ( p . config . ReferenceStore , ref , manifestDigest , id )
}
2017-01-25 19:54:18 -05:00
} else if err != refstore . ErrDoesNotExist {
2016-06-27 13:09:57 -04:00
return false , err
}
2016-12-16 14:19:05 -05:00
if canonical , ok := ref . ( reference . Canonical ) ; ok {
if err = p . config . ReferenceStore . AddDigest ( canonical , id , true ) ; err != nil {
return false , err
}
} else {
if err = addDigestReference ( p . config . ReferenceStore , ref , manifestDigest , id ) ; err != nil {
return false , err
}
if err = p . config . ReferenceStore . AddTag ( ref , id , true ) ; err != nil {
return false , err
}
2016-06-27 13:09:57 -04:00
}
2015-11-18 17:18:44 -05:00
}
2015-12-08 14:14:02 -05:00
return true , nil
}
2015-12-04 16:42:33 -05:00
2018-06-26 17:49:33 -04:00
func ( p * v2Puller ) pullSchema1 ( ctx context . Context , ref reference . Reference , unverifiedManifest * schema1 . SignedManifest , platform * specs . Platform ) ( id digest . Digest , manifestDigest digest . Digest , err error ) {
2021-07-27 06:46:41 -04:00
if platform != nil {
// Early bath if the requested OS doesn't match that of the configuration.
// This avoids doing the download, only to potentially fail later.
if ! system . IsOSSupported ( platform . OS ) {
return "" , "" , fmt . Errorf ( "cannot download image with operating system %q when requesting %q" , runtime . GOOS , platform . OS )
}
}
2015-11-18 17:18:44 -05:00
var verifiedManifest * schema1 . Manifest
2015-12-11 18:24:12 -05:00
verifiedManifest , err = verifySchema1Manifest ( unverifiedManifest , ref )
2015-11-18 17:18:44 -05:00
if err != nil {
2015-12-11 18:24:12 -05:00
return "" , "" , err
2015-11-18 17:18:44 -05:00
}
rootFS := image . NewRootFS ( )
// remove duplicate layers and check parent chain validity
err = fixManifestLayers ( verifiedManifest )
if err != nil {
2015-12-11 18:24:12 -05:00
return "" , "" , err
2015-11-18 17:18:44 -05:00
}
2015-11-13 19:59:01 -05:00
var descriptors [ ] xfer . DownloadDescriptor
2015-11-18 17:18:44 -05:00
// Image history converted to the new format
var history [ ] image . History
// Note that the order of this loop is in the direction of bottom-most
// to top-most, so that the downloads slice gets ordered correctly.
for i := len ( verifiedManifest . FSLayers ) - 1 ; i >= 0 ; i -- {
blobSum := verifiedManifest . FSLayers [ i ] . BlobSum
2020-10-12 14:08:28 -04:00
if err = blobSum . Validate ( ) ; err != nil {
return "" , "" , errors . Wrapf ( err , "could not validate layer digest %q" , blobSum )
}
2015-11-18 17:18:44 -05:00
var throwAway struct {
ThrowAway bool ` json:"throwaway,omitempty" `
}
if err := json . Unmarshal ( [ ] byte ( verifiedManifest . History [ i ] . V1Compatibility ) , & throwAway ) ; err != nil {
2015-12-11 18:24:12 -05:00
return "" , "" , err
2015-11-18 17:18:44 -05:00
}
h , err := v1 . HistoryFromConfig ( [ ] byte ( verifiedManifest . History [ i ] . V1Compatibility ) , throwAway . ThrowAway )
if err != nil {
2015-12-11 18:24:12 -05:00
return "" , "" , err
2015-11-18 17:18:44 -05:00
}
history = append ( history , h )
if throwAway . ThrowAway {
continue
}
2015-11-13 19:59:01 -05:00
layerDescriptor := & v2LayerDescriptor {
2016-01-13 22:34:27 -05:00
digest : blobSum ,
repoInfo : p . repoInfo ,
repo : p . repo ,
V2MetadataService : p . V2MetadataService ,
2015-11-18 17:18:44 -05:00
}
2015-11-13 19:59:01 -05:00
descriptors = append ( descriptors , layerDescriptor )
2015-11-18 17:18:44 -05:00
}
2021-07-27 06:46:41 -04:00
resultRootFS , release , err := p . config . DownloadManager . Download ( ctx , * rootFS , runtime . GOOS , descriptors , p . config . ProgressOutput )
2015-11-13 19:59:01 -05:00
if err != nil {
2015-12-11 18:24:12 -05:00
return "" , "" , err
2015-11-18 17:18:44 -05:00
}
2015-11-13 19:59:01 -05:00
defer release ( )
2015-11-18 17:18:44 -05:00
2015-11-13 19:59:01 -05:00
config , err := v1 . MakeConfigFromV1Config ( [ ] byte ( verifiedManifest . History [ 0 ] . V1Compatibility ) , & resultRootFS , history )
2015-11-18 17:18:44 -05:00
if err != nil {
2015-12-11 18:24:12 -05:00
return "" , "" , err
2015-11-18 17:18:44 -05:00
}
2020-10-30 15:47:06 -04:00
imageID , err := p . config . ImageStore . Put ( ctx , config )
2015-11-18 17:18:44 -05:00
if err != nil {
2015-12-11 18:24:12 -05:00
return "" , "" , err
2015-11-18 17:18:44 -05:00
}
2015-12-11 18:24:12 -05:00
manifestDigest = digest . FromBytes ( unverifiedManifest . Canonical )
2016-12-16 14:19:05 -05:00
return imageID , manifestDigest , nil
2015-12-11 18:24:12 -05:00
}
2019-10-10 17:33:15 -04:00
func ( p * v2Puller ) pullSchema2Layers ( ctx context . Context , target distribution . Descriptor , layers [ ] distribution . Descriptor , platform * specs . Platform ) ( id digest . Digest , err error ) {
2020-10-30 15:47:06 -04:00
if _ , err := p . config . ImageStore . Get ( ctx , target . Digest ) ; err == nil {
2015-12-11 18:24:12 -05:00
// If the image already exists locally, no need to pull
// anything.
2019-10-10 17:33:15 -04:00
return target . Digest , nil
2015-12-11 18:24:12 -05:00
}
2016-05-25 22:11:51 -04:00
var descriptors [ ] xfer . DownloadDescriptor
// Note that the order of this loop is in the direction of bottom-most
// to top-most, so that the downloads slice gets ordered correctly.
2019-10-10 17:33:15 -04:00
for _ , d := range layers {
2020-10-12 14:08:28 -04:00
if err := d . Digest . Validate ( ) ; err != nil {
return "" , errors . Wrapf ( err , "could not validate layer digest %q" , d . Digest )
}
2016-05-25 22:11:51 -04:00
layerDescriptor := & v2LayerDescriptor {
digest : d . Digest ,
repo : p . repo ,
repoInfo : p . repoInfo ,
V2MetadataService : p . V2MetadataService ,
2016-06-06 20:49:34 -04:00
src : d ,
2016-05-25 22:11:51 -04:00
}
descriptors = append ( descriptors , layerDescriptor )
}
2015-12-11 18:24:12 -05:00
configChan := make ( chan [ ] byte , 1 )
2017-01-30 20:04:14 -05:00
configErrChan := make ( chan error , 1 )
layerErrChan := make ( chan error , 1 )
downloadsDone := make ( chan struct { } )
2015-12-11 18:24:12 -05:00
var cancel func ( )
ctx , cancel = context . WithCancel ( ctx )
2017-01-30 20:04:14 -05:00
defer cancel ( )
2015-12-11 18:24:12 -05:00
// Pull the image config
go func ( ) {
2016-09-15 19:37:32 -04:00
configJSON , err := p . pullSchema2Config ( ctx , target . Digest )
2015-12-11 18:24:12 -05:00
if err != nil {
2017-01-30 20:04:14 -05:00
configErrChan <- ImageConfigPullError { Err : err }
2015-12-11 18:24:12 -05:00
cancel ( )
return
}
configChan <- configJSON
} ( )
2016-01-08 18:38:55 -05:00
var (
2018-02-15 16:17:27 -05:00
configJSON [ ] byte // raw serialized image config
downloadedRootFS * image . RootFS // rootFS from registered layers
configRootFS * image . RootFS // rootFS from configuration
release func ( ) // release resources from rootFS download
configPlatform * specs . Platform // for LCOW when registering downloaded layers
2016-01-08 18:38:55 -05:00
)
2016-07-22 18:29:21 -04:00
2018-06-26 17:49:33 -04:00
layerStoreOS := runtime . GOOS
if platform != nil {
layerStoreOS = platform . OS
}
2016-07-22 18:29:21 -04:00
// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
2016-09-08 19:28:23 -04:00
// explicitly blocking images intended for linux from the Windows daemon. On
// Windows, we do this before the attempt to download, effectively serialising
// the download slightly slowing it down. We have to do it this way, as
// chances are the download of layers itself would fail due to file names
// which aren't suitable for NTFS. At some point in the future, if a similar
// check to block Windows images being pulled on Linux is implemented, it
// may be necessary to perform the same type of serialisation.
if runtime . GOOS == "windows" {
2018-02-15 16:17:27 -05:00
configJSON , configRootFS , configPlatform , err = receiveConfig ( p . config . ImageStore , configChan , configErrChan )
2016-09-08 19:28:23 -04:00
if err != nil {
2019-10-10 17:33:15 -04:00
return "" , err
2016-09-08 19:28:23 -04:00
}
2016-12-16 14:19:05 -05:00
if configRootFS == nil {
2019-10-10 17:33:15 -04:00
return "" , errRootFSInvalid
2016-09-08 19:28:23 -04:00
}
2018-02-15 16:17:27 -05:00
if err := checkImageCompatibility ( configPlatform . OS , configPlatform . OSVersion ) ; err != nil {
2019-10-10 17:33:15 -04:00
return "" , err
2018-02-15 16:17:27 -05:00
}
2017-05-05 13:56:40 -04:00
if len ( descriptors ) != len ( configRootFS . DiffIDs ) {
2019-10-10 17:33:15 -04:00
return "" , errRootFSMismatch
2017-05-05 13:56:40 -04:00
}
2018-06-26 17:49:33 -04:00
if platform == nil {
// Early bath if the requested OS doesn't match that of the configuration.
// This avoids doing the download, only to potentially fail later.
if ! system . IsOSSupported ( configPlatform . OS ) {
2019-10-10 17:33:15 -04:00
return "" , fmt . Errorf ( "cannot download image with operating system %q when requesting %q" , configPlatform . OS , layerStoreOS )
2018-06-26 17:49:33 -04:00
}
layerStoreOS = configPlatform . OS
2017-08-08 15:43:48 -04:00
}
2017-05-05 13:56:40 -04:00
// Populate diff ids in descriptors to avoid downloading foreign layers
// which have been side loaded
for i := range descriptors {
descriptors [ i ] . ( * v2LayerDescriptor ) . diffID = configRootFS . DiffIDs [ i ]
}
2016-01-08 18:38:55 -05:00
}
2016-12-16 14:19:05 -05:00
if p . config . DownloadManager != nil {
2017-01-30 20:04:14 -05:00
go func ( ) {
var (
err error
rootFS image . RootFS
)
downloadRootFS := * image . NewRootFS ( )
2018-06-26 17:49:33 -04:00
rootFS , release , err = p . config . DownloadManager . Download ( ctx , downloadRootFS , layerStoreOS , descriptors , p . config . ProgressOutput )
2017-01-30 20:04:14 -05:00
if err != nil {
// Intentionally do not cancel the config download here
// as the error from config download (if there is one)
// is more interesting than the layer download error
layerErrChan <- err
return
2015-12-11 18:24:12 -05:00
}
2016-12-16 14:19:05 -05:00
2017-01-30 20:04:14 -05:00
downloadedRootFS = & rootFS
close ( downloadsDone )
} ( )
} else {
// We have nothing to download
close ( downloadsDone )
2015-12-11 18:24:12 -05:00
}
2016-09-08 19:28:23 -04:00
if configJSON == nil {
2017-09-13 15:49:04 -04:00
configJSON , configRootFS , _ , err = receiveConfig ( p . config . ImageStore , configChan , configErrChan )
2017-01-30 20:04:14 -05:00
if err == nil && configRootFS == nil {
err = errRootFSInvalid
}
2016-09-08 19:28:23 -04:00
if err != nil {
2017-01-30 20:04:14 -05:00
cancel ( )
select {
case <- downloadsDone :
case <- layerErrChan :
}
2019-10-10 17:33:15 -04:00
return "" , err
2016-09-08 19:28:23 -04:00
}
2017-01-30 20:04:14 -05:00
}
2015-12-11 18:24:12 -05:00
2017-01-30 20:04:14 -05:00
select {
case <- downloadsDone :
case err = <- layerErrChan :
2019-10-10 17:33:15 -04:00
return "" , err
2017-01-30 20:04:14 -05:00
}
if release != nil {
defer release ( )
2016-08-10 15:04:42 -04:00
}
2016-12-16 14:19:05 -05:00
if downloadedRootFS != nil {
// The DiffIDs returned in rootFS MUST match those in the config.
// Otherwise the image config could be referencing layers that aren't
// included in the manifest.
if len ( downloadedRootFS . DiffIDs ) != len ( configRootFS . DiffIDs ) {
2019-10-10 17:33:15 -04:00
return "" , errRootFSMismatch
2015-12-11 18:24:12 -05:00
}
2016-12-16 14:19:05 -05:00
for i := range downloadedRootFS . DiffIDs {
if downloadedRootFS . DiffIDs [ i ] != configRootFS . DiffIDs [ i ] {
2019-10-10 17:33:15 -04:00
return "" , errRootFSMismatch
2016-12-16 14:19:05 -05:00
}
}
2015-12-11 18:24:12 -05:00
}
2020-10-30 15:47:06 -04:00
imageID , err := p . config . ImageStore . Put ( ctx , configJSON )
2019-10-10 17:33:15 -04:00
if err != nil {
return "" , err
}
return imageID , nil
}
func ( p * v2Puller ) pullSchema2 ( ctx context . Context , ref reference . Named , mfst * schema2 . DeserializedManifest , platform * specs . Platform ) ( id digest . Digest , manifestDigest digest . Digest , err error ) {
manifestDigest , err = schema2ManifestDigest ( ref , mfst )
2015-12-11 18:24:12 -05:00
if err != nil {
return "" , "" , err
}
2019-10-10 17:33:15 -04:00
id , err = p . pullSchema2Layers ( ctx , mfst . Target ( ) , mfst . Layers , platform )
return id , manifestDigest , err
}
2015-12-11 18:24:12 -05:00
2019-10-10 17:33:15 -04:00
func ( p * v2Puller ) pullOCI ( ctx context . Context , ref reference . Named , mfst * ocischema . DeserializedManifest , platform * specs . Platform ) ( id digest . Digest , manifestDigest digest . Digest , err error ) {
manifestDigest , err = schema2ManifestDigest ( ref , mfst )
if err != nil {
return "" , "" , err
}
id , err = p . pullSchema2Layers ( ctx , mfst . Target ( ) , mfst . Layers , platform )
return id , manifestDigest , err
2015-12-11 18:24:12 -05:00
}
2018-02-15 16:17:27 -05:00
func receiveConfig ( s ImageConfigStore , configChan <- chan [ ] byte , errChan <- chan error ) ( [ ] byte , * image . RootFS , * specs . Platform , error ) {
2016-01-08 18:38:55 -05:00
select {
case configJSON := <- configChan :
2018-02-15 16:17:27 -05:00
rootfs , err := s . RootFSFromConfig ( configJSON )
if err != nil {
return nil , nil , nil , err
}
platform , err := s . PlatformFromConfig ( configJSON )
2016-12-16 14:19:05 -05:00
if err != nil {
2018-02-15 16:17:27 -05:00
return nil , nil , nil , err
2016-01-08 18:38:55 -05:00
}
2018-02-15 16:17:27 -05:00
return configJSON , rootfs , platform , nil
2016-01-08 18:38:55 -05:00
case err := <- errChan :
2018-02-15 16:17:27 -05:00
return nil , nil , nil , err
2016-01-08 18:38:55 -05:00
// Don't need a case for ctx.Done in the select because cancellation
// will trigger an error in p.pullSchema2ImageConfig.
}
}
2015-12-16 22:19:22 -05:00
// pullManifestList handles "manifest lists" which point to various
2017-01-16 23:45:27 -05:00
// platform-specific manifests.
2018-06-26 17:49:33 -04:00
func ( p * v2Puller ) pullManifestList ( ctx context . Context , ref reference . Named , mfstList * manifestlist . DeserializedManifestList , pp * specs . Platform ) ( id digest . Digest , manifestListDigest digest . Digest , err error ) {
2015-12-16 22:19:22 -05:00
manifestListDigest , err = schema2ManifestDigest ( ref , mfstList )
if err != nil {
return "" , "" , err
}
2018-06-26 17:49:33 -04:00
var platform specs . Platform
if pp != nil {
platform = * pp
2018-02-23 18:29:26 -05:00
}
2018-06-26 17:49:33 -04:00
logrus . Debugf ( "%s resolved to a manifestList object with %d entries; looking for a %s/%s match" , ref , len ( mfstList . Manifests ) , platforms . Format ( platform ) , runtime . GOARCH )
2015-12-16 22:19:22 -05:00
2018-06-26 17:49:33 -04:00
manifestMatches := filterManifests ( mfstList . Manifests , platform )
2017-10-03 19:58:07 -04:00
if len ( manifestMatches ) == 0 {
2019-01-15 12:24:15 -05:00
errMsg := fmt . Sprintf ( "no matching manifest for %s in the manifest list entries" , formatPlatform ( platform ) )
2017-02-09 19:13:57 -05:00
logrus . Debugf ( errMsg )
return "" , "" , errors . New ( errMsg )
2015-12-16 22:19:22 -05:00
}
2017-10-03 19:58:07 -04:00
if len ( manifestMatches ) > 1 {
logrus . Debugf ( "found multiple matches in manifest list, choosing best match %s" , manifestMatches [ 0 ] . Digest . String ( ) )
}
2020-10-30 15:47:06 -04:00
match := manifestMatches [ 0 ]
2017-10-03 19:58:07 -04:00
2020-10-30 15:47:06 -04:00
if err := checkImageCompatibility ( match . Platform . OS , match . Platform . OSVersion ) ; err != nil {
2015-12-16 22:19:22 -05:00
return "" , "" , err
}
2020-12-03 16:15:18 -05:00
desc := specs . Descriptor {
2020-10-30 15:47:06 -04:00
Digest : match . Digest ,
Size : match . Size ,
MediaType : match . MediaType ,
2020-12-03 16:15:18 -05:00
}
manifest , err := p . manifestStore . Get ( ctx , desc )
2015-12-16 22:19:22 -05:00
if err != nil {
return "" , "" , err
}
2020-10-30 15:47:06 -04:00
manifestRef , err := reference . WithDigest ( reference . TrimNamed ( ref ) , match . Digest )
2015-12-16 22:19:22 -05:00
if err != nil {
return "" , "" , err
}
switch v := manifest . ( type ) {
case * schema1 . SignedManifest :
2019-08-13 14:13:14 -04:00
msg := fmt . Sprintf ( "[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/" , ref )
2019-06-14 21:56:28 -04:00
logrus . Warn ( msg )
progress . Message ( p . config . ProgressOutput , "" , msg )
2018-06-26 17:49:33 -04:00
platform := toOCIPlatform ( manifestMatches [ 0 ] . Platform )
id , _ , err = p . pullSchema1 ( ctx , manifestRef , v , & platform )
2015-12-16 22:19:22 -05:00
if err != nil {
return "" , "" , err
}
case * schema2 . DeserializedManifest :
2018-06-26 17:49:33 -04:00
platform := toOCIPlatform ( manifestMatches [ 0 ] . Platform )
id , _ , err = p . pullSchema2 ( ctx , manifestRef , v , & platform )
2015-12-16 22:19:22 -05:00
if err != nil {
return "" , "" , err
}
2019-10-10 17:33:15 -04:00
case * ocischema . DeserializedManifest :
platform := toOCIPlatform ( manifestMatches [ 0 ] . Platform )
id , _ , err = p . pullOCI ( ctx , manifestRef , v , & platform )
if err != nil {
return "" , "" , err
}
2015-12-16 22:19:22 -05:00
default :
return "" , "" , errors . New ( "unsupported manifest format" )
}
2016-09-15 19:37:32 -04:00
return id , manifestListDigest , err
2015-12-16 22:19:22 -05:00
}
2016-09-15 19:37:32 -04:00
func ( p * v2Puller ) pullSchema2Config ( ctx context . Context , dgst digest . Digest ) ( configJSON [ ] byte , err error ) {
2015-12-11 18:24:12 -05:00
blobs := p . repo . Blobs ( ctx )
configJSON , err = blobs . Get ( ctx , dgst )
if err != nil {
return nil , err
}
// Verify image config digest
2017-01-06 20:23:18 -05:00
verifier := dgst . Verifier ( )
2015-12-11 18:24:12 -05:00
if _ , err := verifier . Write ( configJSON ) ; err != nil {
return nil , err
}
if ! verifier . Verified ( ) {
err := fmt . Errorf ( "image config verification failed for digest %s" , dgst )
logrus . Error ( err )
return nil , err
2015-11-18 17:18:44 -05:00
}
2015-12-11 18:24:12 -05:00
return configJSON , nil
2015-11-18 17:18:44 -05:00
}
2015-12-16 22:19:22 -05:00
// schema2ManifestDigest computes the manifest digest, and, if pulling by
// digest, ensures that it matches the requested digest.
func schema2ManifestDigest ( ref reference . Named , mfst distribution . Manifest ) ( digest . Digest , error ) {
_ , canonical , err := mfst . Payload ( )
if err != nil {
return "" , err
}
// If pull by digest, then verify the manifest digest.
if digested , isDigested := ref . ( reference . Canonical ) ; isDigested {
2017-01-06 20:23:18 -05:00
verifier := digested . Digest ( ) . Verifier ( )
2015-12-16 22:19:22 -05:00
if _ , err := verifier . Write ( canonical ) ; err != nil {
return "" , err
}
if ! verifier . Verified ( ) {
err := fmt . Errorf ( "manifest verification failed for digest %s" , digested . Digest ( ) )
logrus . Error ( err )
return "" , err
}
return digested . Digest ( ) , nil
}
return digest . FromBytes ( canonical ) , nil
}
2017-08-23 18:21:41 -04:00
func verifySchema1Manifest ( signedManifest * schema1 . SignedManifest , ref reference . Reference ) ( m * schema1 . Manifest , err error ) {
2015-11-18 17:18:44 -05:00
// If pull by digest, then verify the manifest digest. NOTE: It is
// important to do this first, before any other content validation. If the
// digest cannot be verified, don't even bother with those other things.
2015-12-04 16:55:15 -05:00
if digested , isCanonical := ref . ( reference . Canonical ) ; isCanonical {
2017-01-06 20:23:18 -05:00
verifier := digested . Digest ( ) . Verifier ( )
2015-12-08 14:14:02 -05:00
if _ , err := verifier . Write ( signedManifest . Canonical ) ; err != nil {
2015-11-18 17:18:44 -05:00
return nil , err
}
if ! verifier . Verified ( ) {
err := fmt . Errorf ( "image verification failed for digest %s" , digested . Digest ( ) )
logrus . Error ( err )
return nil , err
}
}
2015-12-08 14:14:02 -05:00
m = & signedManifest . Manifest
2015-11-18 17:18:44 -05:00
if m . SchemaVersion != 1 {
2017-01-25 19:54:18 -05:00
return nil , fmt . Errorf ( "unsupported schema version %d for %q" , m . SchemaVersion , reference . FamiliarString ( ref ) )
2015-11-18 17:18:44 -05:00
}
if len ( m . FSLayers ) != len ( m . History ) {
2017-01-25 19:54:18 -05:00
return nil , fmt . Errorf ( "length of history not equal to number of layers for %q" , reference . FamiliarString ( ref ) )
2015-11-18 17:18:44 -05:00
}
if len ( m . FSLayers ) == 0 {
2017-01-25 19:54:18 -05:00
return nil , fmt . Errorf ( "no FSLayers in manifest for %q" , reference . FamiliarString ( ref ) )
2015-11-18 17:18:44 -05:00
}
return m , nil
}
// fixManifestLayers removes repeated layers from the manifest and checks the
// correctness of the parent chain.
func fixManifestLayers ( m * schema1 . Manifest ) error {
imgs := make ( [ ] * image . V1Image , len ( m . FSLayers ) )
for i := range m . FSLayers {
img := & image . V1Image { }
if err := json . Unmarshal ( [ ] byte ( m . History [ i ] . V1Compatibility ) , img ) ; err != nil {
return err
}
imgs [ i ] = img
if err := v1 . ValidateID ( img . ID ) ; err != nil {
return err
}
}
if imgs [ len ( imgs ) - 1 ] . Parent != "" && runtime . GOOS != "windows" {
// Windows base layer can point to a base layer parent that is not in manifest.
2016-09-08 04:44:56 -04:00
return errors . New ( "invalid parent ID in the base layer of the image" )
2015-11-18 17:18:44 -05:00
}
// check general duplicates to error instead of a deadlock
idmap := make ( map [ string ] struct { } )
var lastID string
for _ , img := range imgs {
// skip IDs that appear after each other, we handle those later
if _ , exists := idmap [ img . ID ] ; img . ID != lastID && exists {
return fmt . Errorf ( "ID %+v appears multiple times in manifest" , img . ID )
}
lastID = img . ID
idmap [ lastID ] = struct { } { }
}
// backwards loop so that we keep the remaining indexes after removing items
for i := len ( imgs ) - 2 ; i >= 0 ; i -- {
if imgs [ i ] . ID == imgs [ i + 1 ] . ID { // repeated ID. remove and continue
m . FSLayers = append ( m . FSLayers [ : i ] , m . FSLayers [ i + 1 : ] ... )
m . History = append ( m . History [ : i ] , m . History [ i + 1 : ] ... )
} else if imgs [ i ] . Parent != imgs [ i + 1 ] . ID {
2017-08-17 15:16:30 -04:00
return fmt . Errorf ( "invalid parent ID. Expected %v, got %v" , imgs [ i + 1 ] . ID , imgs [ i ] . Parent )
2015-11-18 17:18:44 -05:00
}
}
return nil
}
2016-01-25 21:20:18 -05:00
func createDownloadFile ( ) ( * os . File , error ) {
return ioutil . TempFile ( "" , "GetImageBlob" )
}
2018-06-26 17:49:33 -04:00
func toOCIPlatform ( p manifestlist . PlatformSpec ) specs . Platform {
return specs . Platform {
OS : p . OS ,
Architecture : p . Architecture ,
Variant : p . Variant ,
OSFeatures : p . OSFeatures ,
OSVersion : p . OSVersion ,
}
}