2018-02-05 16:05:59 -05:00
|
|
|
package distribution // import "github.com/docker/docker/distribution"
|
2015-11-18 17:18:44 -05:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"compress/gzip"
|
2018-04-19 18:30:59 -04:00
|
|
|
"context"
|
2015-11-18 17:18:44 -05:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
|
2017-01-25 19:54:18 -05:00
|
|
|
"github.com/docker/distribution/reference"
|
2015-11-13 19:59:01 -05:00
|
|
|
"github.com/docker/docker/pkg/progress"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2015-11-18 17:18:44 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
const compressionBufSize = 32768
|
|
|
|
|
2022-02-27 15:33:35 -05:00
|
|
|
// Push initiates a push operation on ref. ref is the specific variant of the
|
|
|
|
// image to push. If no tag is provided, all tags are pushed.
|
|
|
|
func Push(ctx context.Context, ref reference.Named, config *ImagePushConfig) error {
|
2015-11-18 17:18:44 -05:00
|
|
|
// FIXME: Allow to interrupt current push when new push of same image is done.
|
|
|
|
|
|
|
|
// Resolve the Repository name from fqn to RepositoryInfo
|
2022-02-27 15:33:35 -05:00
|
|
|
repoInfo, err := config.RegistryService.ResolveRepository(ref)
|
2015-11-18 17:18:44 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-02-27 15:33:35 -05:00
|
|
|
endpoints, err := config.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name))
|
2015-11-18 17:18:44 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-02-27 15:33:35 -05:00
|
|
|
progress.Messagef(config.ProgressOutput, "", "The push refers to repository [%s]", repoInfo.Name.Name())
|
2015-11-18 17:18:44 -05:00
|
|
|
|
2022-02-27 15:33:35 -05:00
|
|
|
associations := config.ReferenceStore.ReferencesByName(repoInfo.Name)
|
2015-11-18 17:18:44 -05:00
|
|
|
if len(associations) == 0 {
|
2017-01-25 19:54:18 -05:00
|
|
|
return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name))
|
2015-11-18 17:18:44 -05:00
|
|
|
}
|
|
|
|
|
2015-12-04 16:42:33 -05:00
|
|
|
var (
|
|
|
|
lastErr error
|
|
|
|
|
2016-02-11 18:45:29 -05:00
|
|
|
// confirmedTLSRegistries is a map indicating which registries
|
|
|
|
// are known to be using TLS. There should never be a plaintext
|
|
|
|
// retry for any of these.
|
|
|
|
confirmedTLSRegistries = make(map[string]struct{})
|
2015-12-04 16:42:33 -05:00
|
|
|
)
|
|
|
|
|
2015-11-18 17:18:44 -05:00
|
|
|
for _, endpoint := range endpoints {
|
2016-02-17 19:53:25 -05:00
|
|
|
if endpoint.URL.Scheme != "https" {
|
|
|
|
if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
|
2016-02-11 18:45:29 -05:00
|
|
|
logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-27 15:33:35 -05:00
|
|
|
logrus.Debugf("Trying to push %s to %s", repoInfo.Name.Name(), endpoint.URL)
|
2015-11-18 17:18:44 -05:00
|
|
|
|
2022-02-27 15:33:35 -05:00
|
|
|
if err := newPusher(ref, endpoint, repoInfo, config).push(ctx); err != nil {
|
2015-11-13 19:59:01 -05:00
|
|
|
// Was this push cancelled? If so, don't try to fall
|
|
|
|
// back.
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
default:
|
2015-12-04 16:42:33 -05:00
|
|
|
if fallbackErr, ok := err.(fallbackError); ok {
|
2016-02-17 19:53:25 -05:00
|
|
|
if fallbackErr.transportOK && endpoint.URL.Scheme == "https" {
|
|
|
|
confirmedTLSRegistries[endpoint.URL.Host] = struct{}{}
|
2016-02-11 18:45:29 -05:00
|
|
|
}
|
2015-12-04 16:42:33 -05:00
|
|
|
err = fallbackErr.err
|
|
|
|
lastErr = err
|
2017-02-20 11:50:31 -05:00
|
|
|
logrus.Infof("Attempting next endpoint for push after error: %v", err)
|
2015-12-04 16:42:33 -05:00
|
|
|
continue
|
|
|
|
}
|
2015-11-13 19:59:01 -05:00
|
|
|
}
|
|
|
|
|
2016-02-11 17:08:49 -05:00
|
|
|
logrus.Errorf("Not continuing with push after error: %v", err)
|
2015-11-18 17:18:44 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-02-27 15:33:35 -05:00
|
|
|
config.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push")
|
2015-11-18 17:18:44 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if lastErr == nil {
|
2017-01-25 19:54:18 -05:00
|
|
|
lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name())
|
2015-11-18 17:18:44 -05:00
|
|
|
}
|
|
|
|
return lastErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// compress returns an io.ReadCloser which will supply a compressed version of
|
|
|
|
// the provided Reader. The caller must close the ReadCloser after reading the
|
|
|
|
// compressed data.
|
|
|
|
//
|
|
|
|
// Note that this function returns a reader instead of taking a writer as an
|
|
|
|
// argument so that it can be used with httpBlobWriter's ReadFrom method.
|
|
|
|
// Using httpBlobWriter's Write method would send a PATCH request for every
|
|
|
|
// Write call.
|
2016-01-29 17:34:50 -05:00
|
|
|
//
|
|
|
|
// The second return value is a channel that gets closed when the goroutine
|
|
|
|
// is finished. This allows the caller to make sure the goroutine finishes
|
|
|
|
// before it releases any resources connected with the reader that was
|
|
|
|
// passed in.
|
|
|
|
func compress(in io.Reader) (io.ReadCloser, chan struct{}) {
|
|
|
|
compressionDone := make(chan struct{})
|
|
|
|
|
2015-11-18 17:18:44 -05:00
|
|
|
pipeReader, pipeWriter := io.Pipe()
|
|
|
|
// Use a bufio.Writer to avoid excessive chunking in HTTP request.
|
|
|
|
bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize)
|
|
|
|
compressor := gzip.NewWriter(bufWriter)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
_, err := io.Copy(compressor, in)
|
|
|
|
if err == nil {
|
|
|
|
err = compressor.Close()
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
err = bufWriter.Flush()
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
pipeWriter.CloseWithError(err)
|
|
|
|
} else {
|
|
|
|
pipeWriter.Close()
|
|
|
|
}
|
2016-01-29 17:34:50 -05:00
|
|
|
close(compressionDone)
|
2015-11-18 17:18:44 -05:00
|
|
|
}()
|
|
|
|
|
2016-01-29 17:34:50 -05:00
|
|
|
return pipeReader, compressionDone
|
2015-11-18 17:18:44 -05:00
|
|
|
}
|