2018-02-05 16:05:59 -05:00
|
|
|
package xfer // import "github.com/docker/docker/distribution/xfer"
|
2015-11-13 19:59:01 -05:00
|
|
|
|
|
|
|
import (
|
2018-04-19 18:30:59 -04:00
|
|
|
"context"
|
2015-11-13 19:59:01 -05:00
|
|
|
"errors"
|
|
|
|
"time"
|
|
|
|
|
2016-03-01 13:56:05 -05:00
|
|
|
"github.com/docker/distribution"
|
2015-11-13 19:59:01 -05:00
|
|
|
"github.com/docker/docker/layer"
|
|
|
|
"github.com/docker/docker/pkg/progress"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2015-11-13 19:59:01 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
const maxUploadAttempts = 5
|
|
|
|
|
|
|
|
// LayerUploadManager provides task management and progress reporting for
|
|
|
|
// uploads.
|
|
|
|
type LayerUploadManager struct {
|
2022-01-22 06:19:42 -05:00
|
|
|
tm *transferManager
|
2016-12-23 15:09:29 -05:00
|
|
|
waitDuration time.Duration
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
2016-12-21 01:43:28 -05:00
|
|
|
// SetConcurrency sets the max concurrent uploads for each push
|
2016-05-06 00:45:55 -04:00
|
|
|
func (lum *LayerUploadManager) SetConcurrency(concurrency int) {
|
2022-01-22 06:40:35 -05:00
|
|
|
lum.tm.setConcurrency(concurrency)
|
2016-05-06 00:45:55 -04:00
|
|
|
}
|
|
|
|
|
2015-11-13 19:59:01 -05:00
|
|
|
// NewLayerUploadManager returns a new LayerUploadManager.
|
2016-12-23 15:09:29 -05:00
|
|
|
func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager {
|
|
|
|
manager := LayerUploadManager{
|
2022-01-22 06:19:42 -05:00
|
|
|
tm: newTransferManager(concurrencyLimit),
|
2016-12-23 15:09:29 -05:00
|
|
|
waitDuration: time.Second,
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
2016-12-23 15:09:29 -05:00
|
|
|
for _, option := range options {
|
|
|
|
option(&manager)
|
|
|
|
}
|
|
|
|
return &manager
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
type uploadTransfer struct {
|
2022-01-22 07:11:24 -05:00
|
|
|
transfer
|
2015-11-13 19:59:01 -05:00
|
|
|
|
2016-03-01 13:56:05 -05:00
|
|
|
remoteDescriptor distribution.Descriptor
|
|
|
|
err error
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// An UploadDescriptor references a layer that may need to be uploaded.
|
|
|
|
type UploadDescriptor interface {
|
|
|
|
// Key returns the key used to deduplicate uploads.
|
|
|
|
Key() string
|
|
|
|
// ID returns the ID for display purposes.
|
|
|
|
ID() string
|
|
|
|
// DiffID should return the DiffID for this layer.
|
|
|
|
DiffID() layer.DiffID
|
|
|
|
// Upload is called to perform the Upload.
|
2016-03-01 13:56:05 -05:00
|
|
|
Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error)
|
|
|
|
// SetRemoteDescriptor provides the distribution.Descriptor that was
|
|
|
|
// returned by Upload. This descriptor is not to be confused with
|
|
|
|
// the UploadDescriptor interface, which is used for internally
|
|
|
|
// identifying layers that are being uploaded.
|
|
|
|
SetRemoteDescriptor(descriptor distribution.Descriptor)
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Upload is a blocking function which ensures the listed layers are present on
|
|
|
|
// the remote registry. It uses the string returned by the Key method to
|
|
|
|
// deduplicate uploads.
|
2015-12-10 19:27:58 -05:00
|
|
|
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
|
2015-11-13 19:59:01 -05:00
|
|
|
var (
|
|
|
|
uploads []*uploadTransfer
|
2016-03-01 13:56:05 -05:00
|
|
|
dedupDescriptors = make(map[string]*uploadTransfer)
|
2015-11-13 19:59:01 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
for _, descriptor := range layers {
|
|
|
|
progress.Update(progressOutput, descriptor.ID(), "Preparing")
|
|
|
|
|
|
|
|
key := descriptor.Key()
|
|
|
|
if _, present := dedupDescriptors[key]; present {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
xferFunc := lum.makeUploadFunc(descriptor)
|
2022-01-22 06:40:35 -05:00
|
|
|
upload, watcher := lum.tm.transfer(descriptor.Key(), xferFunc, progressOutput)
|
2022-02-18 08:03:35 -05:00
|
|
|
defer upload.release(watcher)
|
2015-11-13 19:59:01 -05:00
|
|
|
uploads = append(uploads, upload.(*uploadTransfer))
|
2016-03-01 13:56:05 -05:00
|
|
|
dedupDescriptors[key] = upload.(*uploadTransfer)
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, upload := range uploads {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2015-12-10 19:27:58 -05:00
|
|
|
return ctx.Err()
|
2022-02-18 08:03:35 -05:00
|
|
|
case <-upload.transfer.done():
|
2015-11-13 19:59:01 -05:00
|
|
|
if upload.err != nil {
|
2015-12-10 19:27:58 -05:00
|
|
|
return upload.err
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-03-01 13:56:05 -05:00
|
|
|
for _, l := range layers {
|
|
|
|
l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor)
|
|
|
|
}
|
2015-11-13 19:59:01 -05:00
|
|
|
|
2015-12-10 19:27:58 -05:00
|
|
|
return nil
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
2022-02-18 10:54:12 -05:00
|
|
|
func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) doFunc {
|
2022-01-22 07:11:24 -05:00
|
|
|
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) transfer {
|
2015-11-13 19:59:01 -05:00
|
|
|
u := &uploadTransfer{
|
2022-01-22 07:11:24 -05:00
|
|
|
transfer: newTransfer(),
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer func() {
|
|
|
|
close(progressChan)
|
|
|
|
}()
|
|
|
|
|
|
|
|
progressOutput := progress.ChanOutput(progressChan)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-start:
|
|
|
|
default:
|
|
|
|
progress.Update(progressOutput, descriptor.ID(), "Waiting")
|
|
|
|
<-start
|
|
|
|
}
|
|
|
|
|
|
|
|
retries := 0
|
|
|
|
for {
|
2022-02-18 08:03:35 -05:00
|
|
|
remoteDescriptor, err := descriptor.Upload(u.transfer.context(), progressOutput)
|
2015-11-13 19:59:01 -05:00
|
|
|
if err == nil {
|
2016-03-01 13:56:05 -05:00
|
|
|
u.remoteDescriptor = remoteDescriptor
|
2015-11-13 19:59:01 -05:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// If an error was returned because the context
|
|
|
|
// was cancelled, we shouldn't retry.
|
|
|
|
select {
|
2022-02-18 08:03:35 -05:00
|
|
|
case <-u.transfer.context().Done():
|
2015-11-13 19:59:01 -05:00
|
|
|
u.err = err
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
retries++
|
|
|
|
if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts {
|
|
|
|
logrus.Errorf("Upload failed: %v", err)
|
|
|
|
u.err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
logrus.Errorf("Upload failed, retrying: %v", err)
|
|
|
|
delay := retries * 5
|
2016-12-23 15:09:29 -05:00
|
|
|
ticker := time.NewTicker(lum.waitDuration)
|
2015-11-13 19:59:01 -05:00
|
|
|
|
|
|
|
selectLoop:
|
|
|
|
for {
|
2016-03-04 10:15:44 -05:00
|
|
|
progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1])
|
2015-11-13 19:59:01 -05:00
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
delay--
|
|
|
|
if delay == 0 {
|
|
|
|
ticker.Stop()
|
|
|
|
break selectLoop
|
|
|
|
}
|
2022-02-18 08:03:35 -05:00
|
|
|
case <-u.transfer.context().Done():
|
2015-11-13 19:59:01 -05:00
|
|
|
ticker.Stop()
|
|
|
|
u.err = errors.New("upload cancelled during retry delay")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return u
|
|
|
|
}
|
|
|
|
}
|