2018-02-05 16:05:59 -05:00
|
|
|
package xfer // import "github.com/docker/docker/distribution/xfer"
|
2015-11-13 19:59:01 -05:00
|
|
|
|
|
|
|
import (
|
2018-04-19 18:30:59 -04:00
|
|
|
"context"
|
2015-11-13 19:59:01 -05:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-09-19 15:14:46 -04:00
|
|
|
"runtime"
|
2015-11-13 19:59:01 -05:00
|
|
|
"time"
|
|
|
|
|
2016-05-25 22:11:51 -04:00
|
|
|
"github.com/docker/distribution"
|
2015-11-13 19:59:01 -05:00
|
|
|
"github.com/docker/docker/image"
|
|
|
|
"github.com/docker/docker/layer"
|
|
|
|
"github.com/docker/docker/pkg/archive"
|
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
|
|
|
"github.com/docker/docker/pkg/progress"
|
2017-11-20 11:33:20 -05:00
|
|
|
"github.com/docker/docker/pkg/system"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2015-11-13 19:59:01 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
const maxDownloadAttempts = 5
|
|
|
|
|
|
|
|
// LayerDownloadManager figures out which layers need to be downloaded, then
|
|
|
|
// registers and downloads those, taking into account dependencies between
|
|
|
|
// layers.
|
|
|
|
type LayerDownloadManager struct {
|
2019-06-25 09:26:36 -04:00
|
|
|
layerStores map[string]layer.Store
|
|
|
|
tm TransferManager
|
|
|
|
waitDuration time.Duration
|
|
|
|
maxDownloadAttempts int
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
2016-12-21 01:43:28 -05:00
|
|
|
// SetConcurrency sets the max concurrent downloads for each pull
|
2016-05-06 00:45:55 -04:00
|
|
|
func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) {
|
|
|
|
ldm.tm.SetConcurrency(concurrency)
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:59:01 -05:00
|
|
|
// NewLayerDownloadManager returns a new LayerDownloadManager.
|
2017-09-19 15:14:46 -04:00
|
|
|
func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager {
|
2016-12-23 15:09:29 -05:00
|
|
|
manager := LayerDownloadManager{
|
2019-06-25 09:26:36 -04:00
|
|
|
layerStores: layerStores,
|
|
|
|
tm: NewTransferManager(concurrencyLimit),
|
|
|
|
waitDuration: time.Second,
|
|
|
|
maxDownloadAttempts: maxDownloadAttempts,
|
2016-12-23 15:09:29 -05:00
|
|
|
}
|
|
|
|
for _, option := range options {
|
|
|
|
option(&manager)
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
2016-12-23 15:09:29 -05:00
|
|
|
return &manager
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
2019-06-25 09:26:36 -04:00
|
|
|
// WithMaxDownloadAttempts configures the maximum number of download
|
|
|
|
// attempts for a download manager.
|
|
|
|
func WithMaxDownloadAttempts(max int) func(*LayerDownloadManager) {
|
|
|
|
return func(dlm *LayerDownloadManager) {
|
|
|
|
dlm.maxDownloadAttempts = max
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:59:01 -05:00
|
|
|
type downloadTransfer struct {
|
|
|
|
Transfer
|
|
|
|
|
|
|
|
layerStore layer.Store
|
|
|
|
layer layer.Layer
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
// result returns the layer resulting from the download, if the download
|
|
|
|
// and registration were successful.
|
|
|
|
func (d *downloadTransfer) result() (layer.Layer, error) {
|
|
|
|
return d.layer, d.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// A DownloadDescriptor references a layer that may need to be downloaded.
|
|
|
|
type DownloadDescriptor interface {
|
|
|
|
// Key returns the key used to deduplicate downloads.
|
|
|
|
Key() string
|
|
|
|
// ID returns the ID for display purposes.
|
|
|
|
ID() string
|
|
|
|
// DiffID should return the DiffID for this layer, or an error
|
|
|
|
// if it is unknown (for example, if it has not been downloaded
|
|
|
|
// before).
|
|
|
|
DiffID() (layer.DiffID, error)
|
|
|
|
// Download is called to perform the download.
|
|
|
|
Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error)
|
2016-01-25 21:20:18 -05:00
|
|
|
// Close is called when the download manager is finished with this
|
|
|
|
// descriptor and will not call Download again or read from the reader
|
|
|
|
// that Download returned.
|
|
|
|
Close()
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an
|
|
|
|
// additional Registered method which gets called after a downloaded layer is
|
|
|
|
// registered. This allows the user of the download manager to know the DiffID
|
|
|
|
// of each registered layer. This method is called if a cast to
|
|
|
|
// DownloadDescriptorWithRegistered is successful.
|
|
|
|
type DownloadDescriptorWithRegistered interface {
|
|
|
|
DownloadDescriptor
|
|
|
|
Registered(diffID layer.DiffID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Download is a blocking function which ensures the requested layers are
|
|
|
|
// present in the layer store. It uses the string returned by the Key method to
|
|
|
|
// deduplicate downloads. If a given layer is not already known to present in
|
|
|
|
// the layer store, and the key is not used by an in-progress download, the
|
|
|
|
// Download method is called to get the layer tar data. Layers are then
|
|
|
|
// registered in the appropriate order. The caller must call the returned
|
2016-12-21 01:43:28 -05:00
|
|
|
// release function once it is done with the returned RootFS object.
|
2017-08-24 14:48:16 -04:00
|
|
|
func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
|
2015-11-13 19:59:01 -05:00
|
|
|
var (
|
|
|
|
topLayer layer.Layer
|
|
|
|
topDownload *downloadTransfer
|
|
|
|
watcher *Watcher
|
|
|
|
missingLayer bool
|
|
|
|
transferKey = ""
|
|
|
|
downloadsByKey = make(map[string]*downloadTransfer)
|
|
|
|
)
|
|
|
|
|
2017-11-20 11:33:20 -05:00
|
|
|
// Assume that the operating system is the host OS if blank, and validate it
|
|
|
|
// to ensure we don't cause a panic by an invalid index into the layerstores.
|
2017-09-19 15:14:46 -04:00
|
|
|
if os == "" {
|
|
|
|
os = runtime.GOOS
|
|
|
|
}
|
2017-11-20 11:33:20 -05:00
|
|
|
if !system.IsOSSupported(os) {
|
|
|
|
return image.RootFS{}, nil, system.ErrNotSupportedOperatingSystem
|
|
|
|
}
|
2017-09-19 15:14:46 -04:00
|
|
|
|
2015-11-13 19:59:01 -05:00
|
|
|
rootFS := initialRootFS
|
|
|
|
for _, descriptor := range layers {
|
|
|
|
key := descriptor.Key()
|
|
|
|
transferKey += key
|
|
|
|
|
|
|
|
if !missingLayer {
|
|
|
|
missingLayer = true
|
|
|
|
diffID, err := descriptor.DiffID()
|
|
|
|
if err == nil {
|
|
|
|
getRootFS := rootFS
|
|
|
|
getRootFS.Append(diffID)
|
2017-09-19 15:14:46 -04:00
|
|
|
l, err := ldm.layerStores[os].Get(getRootFS.ChainID())
|
2015-11-13 19:59:01 -05:00
|
|
|
if err == nil {
|
|
|
|
// Layer already exists.
|
|
|
|
logrus.Debugf("Layer already exists: %s", descriptor.ID())
|
|
|
|
progress.Update(progressOutput, descriptor.ID(), "Already exists")
|
|
|
|
if topLayer != nil {
|
2017-09-19 15:14:46 -04:00
|
|
|
layer.ReleaseAndLog(ldm.layerStores[os], topLayer)
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
topLayer = l
|
|
|
|
missingLayer = false
|
|
|
|
rootFS.Append(diffID)
|
2017-01-31 19:36:17 -05:00
|
|
|
// Register this repository as a source of this layer.
|
|
|
|
withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered)
|
2017-08-24 14:48:16 -04:00
|
|
|
if hasRegistered { // As layerstore may set the driver
|
2017-01-31 19:36:17 -05:00
|
|
|
withRegistered.Registered(diffID)
|
|
|
|
}
|
2015-11-13 19:59:01 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Does this layer have the same data as a previous layer in
|
|
|
|
// the stack? If so, avoid downloading it more than once.
|
|
|
|
var topDownloadUncasted Transfer
|
|
|
|
if existingDownload, ok := downloadsByKey[key]; ok {
|
2017-08-08 15:43:48 -04:00
|
|
|
xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, os)
|
2015-11-13 19:59:01 -05:00
|
|
|
defer topDownload.Transfer.Release(watcher)
|
|
|
|
topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput)
|
|
|
|
topDownload = topDownloadUncasted.(*downloadTransfer)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Layer is not known to exist - download and register it.
|
|
|
|
progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer")
|
|
|
|
|
|
|
|
var xferFunc DoFunc
|
|
|
|
if topDownload != nil {
|
2017-08-08 15:43:48 -04:00
|
|
|
xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, os)
|
2015-11-13 19:59:01 -05:00
|
|
|
defer topDownload.Transfer.Release(watcher)
|
|
|
|
} else {
|
2017-08-08 15:43:48 -04:00
|
|
|
xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, os)
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput)
|
|
|
|
topDownload = topDownloadUncasted.(*downloadTransfer)
|
|
|
|
downloadsByKey[key] = topDownload
|
|
|
|
}
|
|
|
|
|
|
|
|
if topDownload == nil {
|
2016-03-15 14:10:03 -04:00
|
|
|
return rootFS, func() {
|
|
|
|
if topLayer != nil {
|
2017-09-19 15:14:46 -04:00
|
|
|
layer.ReleaseAndLog(ldm.layerStores[os], topLayer)
|
2016-03-15 14:10:03 -04:00
|
|
|
}
|
|
|
|
}, nil
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Won't be using the list built up so far - will generate it
|
|
|
|
// from downloaded layers instead.
|
|
|
|
rootFS.DiffIDs = []layer.DiffID{}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if topLayer != nil {
|
2017-09-19 15:14:46 -04:00
|
|
|
layer.ReleaseAndLog(ldm.layerStores[os], topLayer)
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
topDownload.Transfer.Release(watcher)
|
|
|
|
return rootFS, func() {}, ctx.Err()
|
|
|
|
case <-topDownload.Done():
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
l, err := topDownload.result()
|
|
|
|
if err != nil {
|
|
|
|
topDownload.Transfer.Release(watcher)
|
|
|
|
return rootFS, func() {}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must do this exactly len(layers) times, so we don't include the
|
|
|
|
// base layer on Windows.
|
|
|
|
for range layers {
|
|
|
|
if l == nil {
|
|
|
|
topDownload.Transfer.Release(watcher)
|
|
|
|
return rootFS, func() {}, errors.New("internal error: too few parent layers")
|
|
|
|
}
|
|
|
|
rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...)
|
|
|
|
l = l.Parent()
|
|
|
|
}
|
|
|
|
return rootFS, func() { topDownload.Transfer.Release(watcher) }, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeDownloadFunc returns a function that performs the layer download and
|
|
|
|
// registration. If parentDownload is non-nil, it waits for that download to
|
|
|
|
// complete before the registration step, and registers the downloaded data
|
|
|
|
// on top of parentDownload's resulting layer. Otherwise, it registers the
|
|
|
|
// layer on top of the ChainID given by parentLayer.
|
2017-08-24 14:48:16 -04:00
|
|
|
func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os string) DoFunc {
|
2015-11-13 19:59:01 -05:00
|
|
|
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
|
|
|
d := &downloadTransfer{
|
|
|
|
Transfer: NewTransfer(),
|
2017-09-19 15:14:46 -04:00
|
|
|
layerStore: ldm.layerStores[os],
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer func() {
|
|
|
|
close(progressChan)
|
|
|
|
}()
|
|
|
|
|
|
|
|
progressOutput := progress.ChanOutput(progressChan)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-start:
|
|
|
|
default:
|
|
|
|
progress.Update(progressOutput, descriptor.ID(), "Waiting")
|
|
|
|
<-start
|
|
|
|
}
|
|
|
|
|
|
|
|
if parentDownload != nil {
|
|
|
|
// Did the parent download already fail or get
|
|
|
|
// cancelled?
|
|
|
|
select {
|
|
|
|
case <-parentDownload.Done():
|
|
|
|
_, err := parentDownload.result()
|
|
|
|
if err != nil {
|
|
|
|
d.err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
downloadReader io.ReadCloser
|
|
|
|
size int64
|
|
|
|
err error
|
|
|
|
retries int
|
|
|
|
)
|
|
|
|
|
2016-01-25 21:20:18 -05:00
|
|
|
defer descriptor.Close()
|
|
|
|
|
2015-11-13 19:59:01 -05:00
|
|
|
for {
|
|
|
|
downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput)
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// If an error was returned because the context
|
|
|
|
// was cancelled, we shouldn't retry.
|
|
|
|
select {
|
|
|
|
case <-d.Transfer.Context().Done():
|
|
|
|
d.err = err
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
retries++
|
2019-06-25 09:26:36 -04:00
|
|
|
if _, isDNR := err.(DoNotRetry); isDNR || retries > ldm.maxDownloadAttempts {
|
|
|
|
logrus.Errorf("Download failed after %d attempts: %v", retries, err)
|
2015-11-13 19:59:01 -05:00
|
|
|
d.err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-06-25 09:26:36 -04:00
|
|
|
logrus.Infof("Download failed, retrying (%d/%d): %v", retries, ldm.maxDownloadAttempts, err)
|
2015-11-13 19:59:01 -05:00
|
|
|
delay := retries * 5
|
2016-12-23 15:09:29 -05:00
|
|
|
ticker := time.NewTicker(ldm.waitDuration)
|
2015-11-13 19:59:01 -05:00
|
|
|
|
|
|
|
selectLoop:
|
|
|
|
for {
|
2016-03-04 10:15:44 -05:00
|
|
|
progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1])
|
2015-11-13 19:59:01 -05:00
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
delay--
|
|
|
|
if delay == 0 {
|
|
|
|
ticker.Stop()
|
|
|
|
break selectLoop
|
|
|
|
}
|
|
|
|
case <-d.Transfer.Context().Done():
|
|
|
|
ticker.Stop()
|
|
|
|
d.err = errors.New("download cancelled during retry delay")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
close(inactive)
|
|
|
|
|
|
|
|
if parentDownload != nil {
|
|
|
|
select {
|
|
|
|
case <-d.Transfer.Context().Done():
|
|
|
|
d.err = errors.New("layer registration cancelled")
|
|
|
|
downloadReader.Close()
|
|
|
|
return
|
|
|
|
case <-parentDownload.Done():
|
|
|
|
}
|
|
|
|
|
|
|
|
l, err := parentDownload.result()
|
|
|
|
if err != nil {
|
|
|
|
d.err = err
|
|
|
|
downloadReader.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
parentLayer = l.ChainID()
|
|
|
|
}
|
|
|
|
|
|
|
|
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting")
|
|
|
|
defer reader.Close()
|
|
|
|
|
|
|
|
inflatedLayerData, err := archive.DecompressStream(reader)
|
|
|
|
if err != nil {
|
|
|
|
d.err = fmt.Errorf("could not get decompression stream: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-06-06 20:49:34 -04:00
|
|
|
var src distribution.Descriptor
|
|
|
|
if fs, ok := descriptor.(distribution.Describable); ok {
|
|
|
|
src = fs.Descriptor()
|
|
|
|
}
|
|
|
|
if ds, ok := d.layerStore.(layer.DescribableStore); ok {
|
2017-09-19 15:14:46 -04:00
|
|
|
d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src)
|
2016-06-06 20:49:34 -04:00
|
|
|
} else {
|
2017-09-19 15:14:46 -04:00
|
|
|
d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer)
|
2016-05-25 22:11:51 -04:00
|
|
|
}
|
2015-11-13 19:59:01 -05:00
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case <-d.Transfer.Context().Done():
|
|
|
|
d.err = errors.New("layer registration cancelled")
|
|
|
|
default:
|
|
|
|
d.err = fmt.Errorf("failed to register layer: %v", err)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
progress.Update(progressOutput, descriptor.ID(), "Pull complete")
|
|
|
|
withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered)
|
|
|
|
if hasRegistered {
|
|
|
|
withRegistered.Registered(d.layer.DiffID())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Doesn't actually need to be its own goroutine, but
|
|
|
|
// done like this so we can defer close(c).
|
|
|
|
go func() {
|
|
|
|
<-d.Transfer.Released()
|
|
|
|
if d.layer != nil {
|
|
|
|
layer.ReleaseAndLog(d.layerStore, d.layer)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}()
|
|
|
|
|
|
|
|
return d
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeDownloadFuncFromDownload returns a function that performs the layer
|
|
|
|
// registration when the layer data is coming from an existing download. It
|
|
|
|
// waits for sourceDownload and parentDownload to complete, and then
|
|
|
|
// reregisters the data from sourceDownload's top layer on top of
|
|
|
|
// parentDownload. This function does not log progress output because it would
|
|
|
|
// interfere with the progress reporting for sourceDownload, which has the same
|
|
|
|
// Key.
|
2017-08-24 14:48:16 -04:00
|
|
|
func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os string) DoFunc {
|
2015-11-13 19:59:01 -05:00
|
|
|
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
|
|
|
d := &downloadTransfer{
|
|
|
|
Transfer: NewTransfer(),
|
2017-09-19 15:14:46 -04:00
|
|
|
layerStore: ldm.layerStores[os],
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer func() {
|
|
|
|
close(progressChan)
|
|
|
|
}()
|
|
|
|
|
|
|
|
<-start
|
|
|
|
|
|
|
|
close(inactive)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-d.Transfer.Context().Done():
|
|
|
|
d.err = errors.New("layer registration cancelled")
|
|
|
|
return
|
|
|
|
case <-parentDownload.Done():
|
|
|
|
}
|
|
|
|
|
|
|
|
l, err := parentDownload.result()
|
|
|
|
if err != nil {
|
|
|
|
d.err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
parentLayer := l.ChainID()
|
|
|
|
|
|
|
|
// sourceDownload should have already finished if
|
|
|
|
// parentDownload finished, but wait for it explicitly
|
|
|
|
// to be sure.
|
|
|
|
select {
|
|
|
|
case <-d.Transfer.Context().Done():
|
|
|
|
d.err = errors.New("layer registration cancelled")
|
|
|
|
return
|
|
|
|
case <-sourceDownload.Done():
|
|
|
|
}
|
|
|
|
|
|
|
|
l, err = sourceDownload.result()
|
|
|
|
if err != nil {
|
|
|
|
d.err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
layerReader, err := l.TarStream()
|
|
|
|
if err != nil {
|
|
|
|
d.err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer layerReader.Close()
|
|
|
|
|
2016-06-06 20:49:34 -04:00
|
|
|
var src distribution.Descriptor
|
|
|
|
if fs, ok := l.(distribution.Describable); ok {
|
|
|
|
src = fs.Descriptor()
|
|
|
|
}
|
|
|
|
if ds, ok := d.layerStore.(layer.DescribableStore); ok {
|
2017-09-19 15:14:46 -04:00
|
|
|
d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src)
|
2016-06-06 20:49:34 -04:00
|
|
|
} else {
|
2017-09-19 15:14:46 -04:00
|
|
|
d.layer, err = d.layerStore.Register(layerReader, parentLayer)
|
2016-05-25 22:11:51 -04:00
|
|
|
}
|
2015-11-13 19:59:01 -05:00
|
|
|
if err != nil {
|
|
|
|
d.err = fmt.Errorf("failed to register layer: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered)
|
|
|
|
if hasRegistered {
|
|
|
|
withRegistered.Registered(d.layer.DiffID())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Doesn't actually need to be its own goroutine, but
|
|
|
|
// done like this so we can defer close(c).
|
|
|
|
go func() {
|
|
|
|
<-d.Transfer.Released()
|
|
|
|
if d.layer != nil {
|
|
|
|
layer.ReleaseAndLog(d.layerStore, d.layer)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}()
|
|
|
|
|
|
|
|
return d
|
|
|
|
}
|
|
|
|
}
|