mirror of
				https://github.com/moby/moby.git
				synced 2022-11-09 12:21:53 -05:00 
			
		
		
		
	Merge pull request #18785 from aaronlehmann/new-manifest
New image manifest format
This commit is contained in:
		
						commit
						f11b6a2ab3
					
				
					 62 changed files with 2330 additions and 1165 deletions
				
			
		
							
								
								
									
										11
									
								
								Dockerfile
									
										
									
									
									
								
							
							
						
						
									
										11
									
								
								Dockerfile
									
										
									
									
									
								
							| 
						 | 
				
			
			@ -147,14 +147,21 @@ RUN set -x \
 | 
			
		|||
	) \
 | 
			
		||||
	&& rm -rf "$SECCOMP_PATH"
 | 
			
		||||
 | 
			
		||||
# Install registry
 | 
			
		||||
ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd
 | 
			
		||||
# Install two versions of the registry. The first is an older version that
 | 
			
		||||
# only supports schema1 manifests. The second is a newer version that supports
 | 
			
		||||
# both. This allows integration-cli tests to cover push/pull with both schema1
 | 
			
		||||
# and schema2 manifests.
 | 
			
		||||
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
 | 
			
		||||
ENV REGISTRY_COMMIT a7ae88da459b98b481a245e5b1750134724ac67d
 | 
			
		||||
RUN set -x \
 | 
			
		||||
	&& export GOPATH="$(mktemp -d)" \
 | 
			
		||||
	&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
 | 
			
		||||
	&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
 | 
			
		||||
	&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
 | 
			
		||||
		go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
 | 
			
		||||
	&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
 | 
			
		||||
	&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
 | 
			
		||||
		go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
 | 
			
		||||
	&& rm -rf "$GOPATH"
 | 
			
		||||
 | 
			
		||||
# Install notary server
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -12,8 +12,11 @@ import (
 | 
			
		|||
	"github.com/Sirupsen/logrus"
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest/manifestlist"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema1"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema2"
 | 
			
		||||
	"github.com/docker/distribution/registry/api/errcode"
 | 
			
		||||
	"github.com/docker/distribution/registry/client"
 | 
			
		||||
	"github.com/docker/docker/distribution/metadata"
 | 
			
		||||
	"github.com/docker/docker/distribution/xfer"
 | 
			
		||||
	"github.com/docker/docker/image"
 | 
			
		||||
| 
						 | 
				
			
			@ -27,6 +30,8 @@ import (
 | 
			
		|||
	"golang.org/x/net/context"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
 | 
			
		||||
 | 
			
		||||
type v2Puller struct {
 | 
			
		||||
	blobSumService *metadata.BlobSumService
 | 
			
		||||
	endpoint       registry.APIEndpoint
 | 
			
		||||
| 
						 | 
				
			
			@ -61,18 +66,12 @@ func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
 | 
			
		|||
func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
 | 
			
		||||
	var layersDownloaded bool
 | 
			
		||||
	if !reference.IsNameOnly(ref) {
 | 
			
		||||
		var err error
 | 
			
		||||
		layersDownloaded, err = p.pullV2Tag(ctx, ref)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		manSvc, err := p.repo.Manifests(ctx)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		tags, err := manSvc.Tags()
 | 
			
		||||
		tags, err := p.repo.Tags(ctx).All(ctx)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			// If this repository doesn't exist on V2, we should
 | 
			
		||||
			// permit a fallback to V1.
 | 
			
		||||
| 
						 | 
				
			
			@ -84,8 +83,6 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e
 | 
			
		|||
		// error later on.
 | 
			
		||||
		p.confirmedV2 = true
 | 
			
		||||
 | 
			
		||||
		// This probably becomes a lot nicer after the manifest
 | 
			
		||||
		// refactor...
 | 
			
		||||
		for _, tag := range tags {
 | 
			
		||||
			tagRef, err := reference.WithTag(ref, tag)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
| 
						 | 
				
			
			@ -203,58 +200,111 @@ func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
 | 
			
		||||
	tagOrDigest := ""
 | 
			
		||||
	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
 | 
			
		||||
		tagOrDigest = tagged.Tag()
 | 
			
		||||
	} else if digested, isCanonical := ref.(reference.Canonical); isCanonical {
 | 
			
		||||
		tagOrDigest = digested.Digest().String()
 | 
			
		||||
	} else {
 | 
			
		||||
		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	logrus.Debugf("Pulling ref from V2 registry: %s:%s", ref.FullName(), tagOrDigest)
 | 
			
		||||
 | 
			
		||||
	manSvc, err := p.repo.Manifests(ctx)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	unverifiedManifest, err := manSvc.GetByTag(tagOrDigest)
 | 
			
		||||
	var (
 | 
			
		||||
		manifest    distribution.Manifest
 | 
			
		||||
		tagOrDigest string // Used for logging/progress only
 | 
			
		||||
	)
 | 
			
		||||
	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
 | 
			
		||||
		// NOTE: not using TagService.Get, since it uses HEAD requests
 | 
			
		||||
		// against the manifests endpoint, which are not supported by
 | 
			
		||||
		// all registry versions.
 | 
			
		||||
		manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag()))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
		// If this manifest did not exist, we should allow a possible
 | 
			
		||||
		// fallback to the v1 protocol, because dual-version setups may
 | 
			
		||||
		// not host all manifests with the v2 protocol. We may also get
 | 
			
		||||
		// a "not authorized" error if the manifest doesn't exist.
 | 
			
		||||
			return false, allowV1Fallback(err)
 | 
			
		||||
		}
 | 
			
		||||
	if unverifiedManifest == nil {
 | 
			
		||||
		tagOrDigest = tagged.Tag()
 | 
			
		||||
	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
 | 
			
		||||
		manifest, err = manSvc.Get(ctx, digested.Digest())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		tagOrDigest = digested.Digest().String()
 | 
			
		||||
	} else {
 | 
			
		||||
		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if manifest == nil {
 | 
			
		||||
		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If GetByTag succeeded, we can be confident that the registry on
 | 
			
		||||
	// If manSvc.Get succeeded, we can be confident that the registry on
 | 
			
		||||
	// the other side speaks the v2 protocol.
 | 
			
		||||
	p.confirmedV2 = true
 | 
			
		||||
 | 
			
		||||
	var verifiedManifest *schema1.Manifest
 | 
			
		||||
	verifiedManifest, err = verifyManifest(unverifiedManifest, ref)
 | 
			
		||||
	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
 | 
			
		||||
	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())
 | 
			
		||||
 | 
			
		||||
	var (
 | 
			
		||||
		imageID        image.ID
 | 
			
		||||
		manifestDigest digest.Digest
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	switch v := manifest.(type) {
 | 
			
		||||
	case *schema1.SignedManifest:
 | 
			
		||||
		imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
	case *schema2.DeserializedManifest:
 | 
			
		||||
		imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
	case *manifestlist.DeserializedManifestList:
 | 
			
		||||
		imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
	default:
 | 
			
		||||
		return false, errors.New("unsupported manifest format")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
 | 
			
		||||
 | 
			
		||||
	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		if oldTagImageID == imageID {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
	} else if err != reference.ErrDoesNotExist {
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if canonical, ok := ref.(reference.Canonical); ok {
 | 
			
		||||
		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
 | 
			
		||||
	var verifiedManifest *schema1.Manifest
 | 
			
		||||
	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rootFS := image.NewRootFS()
 | 
			
		||||
 | 
			
		||||
	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// remove duplicate layers and check parent chain validity
 | 
			
		||||
	err = fixManifestLayers(verifiedManifest)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())
 | 
			
		||||
 | 
			
		||||
	var descriptors []xfer.DownloadDescriptor
 | 
			
		||||
 | 
			
		||||
	// Image history converted to the new format
 | 
			
		||||
| 
						 | 
				
			
			@ -269,12 +319,12 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
 | 
			
		|||
			ThrowAway bool `json:"throwaway,omitempty"`
 | 
			
		||||
		}
 | 
			
		||||
		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
			return "", "", err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
			return "", "", err
 | 
			
		||||
		}
 | 
			
		||||
		history = append(history, h)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -293,43 +343,257 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
 | 
			
		|||
 | 
			
		||||
	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
	defer release()
 | 
			
		||||
 | 
			
		||||
	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	imageID, err := p.config.ImageStore.Create(config)
 | 
			
		||||
	imageID, err = p.config.ImageStore.Create(config)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo)
 | 
			
		||||
	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
 | 
			
		||||
 | 
			
		||||
	return imageID, manifestDigest, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
 | 
			
		||||
	manifestDigest, err = schema2ManifestDigest(ref, mfst)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if manifestDigest != "" {
 | 
			
		||||
		progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
 | 
			
		||||
	target := mfst.Target()
 | 
			
		||||
	imageID = image.ID(target.Digest)
 | 
			
		||||
	if _, err := p.config.ImageStore.Get(imageID); err == nil {
 | 
			
		||||
		// If the image already exists locally, no need to pull
 | 
			
		||||
		// anything.
 | 
			
		||||
		return imageID, manifestDigest, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
 | 
			
		||||
	if err == nil && oldTagImageID == imageID {
 | 
			
		||||
		return false, nil
 | 
			
		||||
	configChan := make(chan []byte, 1)
 | 
			
		||||
	errChan := make(chan error, 1)
 | 
			
		||||
	var cancel func()
 | 
			
		||||
	ctx, cancel = context.WithCancel(ctx)
 | 
			
		||||
 | 
			
		||||
	// Pull the image config
 | 
			
		||||
	go func() {
 | 
			
		||||
		configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			errChan <- err
 | 
			
		||||
			cancel()
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		configChan <- configJSON
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	var descriptors []xfer.DownloadDescriptor
 | 
			
		||||
 | 
			
		||||
	// Note that the order of this loop is in the direction of bottom-most
 | 
			
		||||
	// to top-most, so that the downloads slice gets ordered correctly.
 | 
			
		||||
	for _, d := range mfst.References() {
 | 
			
		||||
		layerDescriptor := &v2LayerDescriptor{
 | 
			
		||||
			digest:         d.Digest,
 | 
			
		||||
			repo:           p.repo,
 | 
			
		||||
			blobSumService: p.blobSumService,
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
	if canonical, ok := ref.(reference.Canonical); ok {
 | 
			
		||||
		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
		descriptors = append(descriptors, layerDescriptor)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true, nil
 | 
			
		||||
	var (
 | 
			
		||||
		configJSON         []byte       // raw serialized image config
 | 
			
		||||
		unmarshalledConfig image.Image  // deserialized image config
 | 
			
		||||
		downloadRootFS     image.RootFS // rootFS to use for registering layers.
 | 
			
		||||
	)
 | 
			
		||||
	if runtime.GOOS == "windows" {
 | 
			
		||||
		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", "", err
 | 
			
		||||
		}
 | 
			
		||||
		if unmarshalledConfig.RootFS == nil {
 | 
			
		||||
			return "", "", errors.New("image config has no rootfs section")
 | 
			
		||||
		}
 | 
			
		||||
		downloadRootFS = *unmarshalledConfig.RootFS
 | 
			
		||||
		downloadRootFS.DiffIDs = []layer.DiffID{}
 | 
			
		||||
	} else {
 | 
			
		||||
		downloadRootFS = *image.NewRootFS()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if configJSON != nil {
 | 
			
		||||
			// Already received the config
 | 
			
		||||
			return "", "", err
 | 
			
		||||
		}
 | 
			
		||||
		select {
 | 
			
		||||
		case err = <-errChan:
 | 
			
		||||
			return "", "", err
 | 
			
		||||
		default:
 | 
			
		||||
			cancel()
 | 
			
		||||
			select {
 | 
			
		||||
			case <-configChan:
 | 
			
		||||
			case <-errChan:
 | 
			
		||||
			}
 | 
			
		||||
			return "", "", err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	defer release()
 | 
			
		||||
 | 
			
		||||
	if configJSON == nil {
 | 
			
		||||
		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", "", err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// The DiffIDs returned in rootFS MUST match those in the config.
 | 
			
		||||
	// Otherwise the image config could be referencing layers that aren't
 | 
			
		||||
	// included in the manifest.
 | 
			
		||||
	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
 | 
			
		||||
		return "", "", errRootFSMismatch
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := range rootFS.DiffIDs {
 | 
			
		||||
		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
 | 
			
		||||
			return "", "", errRootFSMismatch
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	imageID, err = p.config.ImageStore.Create(configJSON)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return imageID, manifestDigest, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
 | 
			
		||||
	select {
 | 
			
		||||
	case configJSON := <-configChan:
 | 
			
		||||
		var unmarshalledConfig image.Image
 | 
			
		||||
		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
 | 
			
		||||
			return nil, image.Image{}, err
 | 
			
		||||
		}
 | 
			
		||||
		return configJSON, unmarshalledConfig, nil
 | 
			
		||||
	case err := <-errChan:
 | 
			
		||||
		return nil, image.Image{}, err
 | 
			
		||||
		// Don't need a case for ctx.Done in the select because cancellation
 | 
			
		||||
		// will trigger an error in p.pullSchema2ImageConfig.
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// pullManifestList handles "manifest lists" which point to various
 | 
			
		||||
// platform-specifc manifests.
 | 
			
		||||
func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) {
 | 
			
		||||
	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var manifestDigest digest.Digest
 | 
			
		||||
	for _, manifestDescriptor := range mfstList.Manifests {
 | 
			
		||||
		// TODO(aaronl): The manifest list spec supports optional
 | 
			
		||||
		// "features" and "variant" fields. These are not yet used.
 | 
			
		||||
		// Once they are, their values should be interpreted here.
 | 
			
		||||
		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
 | 
			
		||||
			manifestDigest = manifestDescriptor.Digest
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if manifestDigest == "" {
 | 
			
		||||
		return "", "", errors.New("no supported platform found in manifest list")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	manSvc, err := p.repo.Manifests(ctx)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	manifest, err := manSvc.Get(ctx, manifestDigest)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	manifestRef, err := reference.WithDigest(ref, manifestDigest)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	switch v := manifest.(type) {
 | 
			
		||||
	case *schema1.SignedManifest:
 | 
			
		||||
		imageID, _, err = p.pullSchema1(ctx, manifestRef, v)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", "", err
 | 
			
		||||
		}
 | 
			
		||||
	case *schema2.DeserializedManifest:
 | 
			
		||||
		imageID, _, err = p.pullSchema2(ctx, manifestRef, v)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", "", err
 | 
			
		||||
		}
 | 
			
		||||
	default:
 | 
			
		||||
		return "", "", errors.New("unsupported manifest format")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return imageID, manifestListDigest, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
 | 
			
		||||
	blobs := p.repo.Blobs(ctx)
 | 
			
		||||
	configJSON, err = blobs.Get(ctx, dgst)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Verify image config digest
 | 
			
		||||
	verifier, err := digest.NewDigestVerifier(dgst)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	if _, err := verifier.Write(configJSON); err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	if !verifier.Verified() {
 | 
			
		||||
		err := fmt.Errorf("image config verification failed for digest %s", dgst)
 | 
			
		||||
		logrus.Error(err)
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return configJSON, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// schema2ManifestDigest computes the manifest digest, and, if pulling by
 | 
			
		||||
// digest, ensures that it matches the requested digest.
 | 
			
		||||
func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
 | 
			
		||||
	_, canonical, err := mfst.Payload()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If pull by digest, then verify the manifest digest.
 | 
			
		||||
	if digested, isDigested := ref.(reference.Canonical); isDigested {
 | 
			
		||||
		verifier, err := digest.NewDigestVerifier(digested.Digest())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
		if _, err := verifier.Write(canonical); err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
		if !verifier.Verified() {
 | 
			
		||||
			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
 | 
			
		||||
			logrus.Error(err)
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
		return digested.Digest(), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return digest.FromBytes(canonical), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// allowV1Fallback checks if the error is a possible reason to fallback to v1
 | 
			
		||||
| 
						 | 
				
			
			@ -353,7 +617,7 @@ func allowV1Fallback(err error) error {
 | 
			
		|||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func verifyManifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
 | 
			
		||||
func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
 | 
			
		||||
	// If pull by digest, then verify the manifest digest. NOTE: It is
 | 
			
		||||
	// important to do this first, before any other content validation. If the
 | 
			
		||||
	// digest cannot be verified, don't even bother with those other things.
 | 
			
		||||
| 
						 | 
				
			
			@ -362,13 +626,7 @@ func verifyManifest(signedManifest *schema1.SignedManifest, ref reference.Named)
 | 
			
		|||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		payload, err := signedManifest.Payload()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			// If this failed, the signatures section was corrupted
 | 
			
		||||
			// or missing. Treat the entire manifest as the payload.
 | 
			
		||||
			payload = signedManifest.Raw
 | 
			
		||||
		}
 | 
			
		||||
		if _, err := verifier.Write(payload); err != nil {
 | 
			
		||||
		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		if !verifier.Verified() {
 | 
			
		||||
| 
						 | 
				
			
			@ -376,15 +634,8 @@ func verifyManifest(signedManifest *schema1.SignedManifest, ref reference.Named)
 | 
			
		|||
			logrus.Error(err)
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		var verifiedManifest schema1.Manifest
 | 
			
		||||
		if err = json.Unmarshal(payload, &verifiedManifest); err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
	}
 | 
			
		||||
		m = &verifiedManifest
 | 
			
		||||
	} else {
 | 
			
		||||
	m = &signedManifest.Manifest
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if m.SchemaVersion != 1 {
 | 
			
		||||
		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -123,7 +123,7 @@ func TestValidateManifest(t *testing.T) {
 | 
			
		|||
		t.Fatal("error unmarshaling manifest:", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	verifiedManifest, err := verifyManifest(&goodSignedManifest, expectedDigest)
 | 
			
		||||
	verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal("validateManifest failed:", err)
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -145,7 +145,7 @@ func TestValidateManifest(t *testing.T) {
 | 
			
		|||
		t.Fatal("error unmarshaling manifest:", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	verifiedManifest, err = verifyManifest(&extraDataSignedManifest, expectedDigest)
 | 
			
		||||
	verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal("validateManifest failed:", err)
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -167,7 +167,7 @@ func TestValidateManifest(t *testing.T) {
 | 
			
		|||
		t.Fatal("error unmarshaling manifest:", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	verifiedManifest, err = verifyManifest(&badSignedManifest, expectedDigest)
 | 
			
		||||
	verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest)
 | 
			
		||||
	if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") {
 | 
			
		||||
		t.Fatal("expected validateManifest to fail with digest error")
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -7,7 +7,6 @@ import (
 | 
			
		|||
	"io"
 | 
			
		||||
 | 
			
		||||
	"github.com/Sirupsen/logrus"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/docker/distribution/metadata"
 | 
			
		||||
	"github.com/docker/docker/distribution/xfer"
 | 
			
		||||
	"github.com/docker/docker/image"
 | 
			
		||||
| 
						 | 
				
			
			@ -77,7 +76,6 @@ func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *reg
 | 
			
		|||
			endpoint:       endpoint,
 | 
			
		||||
			repoInfo:       repoInfo,
 | 
			
		||||
			config:         imagePushConfig,
 | 
			
		||||
			layersPushed:   pushMap{layersPushed: make(map[digest.Digest]bool)},
 | 
			
		||||
		}, nil
 | 
			
		||||
	case registry.APIVersion1:
 | 
			
		||||
		return &v1Pusher{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,22 +1,20 @@
 | 
			
		|||
package distribution
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Sirupsen/logrus"
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema1"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema2"
 | 
			
		||||
	"github.com/docker/distribution/registry/client"
 | 
			
		||||
	"github.com/docker/docker/distribution/metadata"
 | 
			
		||||
	"github.com/docker/docker/distribution/xfer"
 | 
			
		||||
	"github.com/docker/docker/image"
 | 
			
		||||
	"github.com/docker/docker/image/v1"
 | 
			
		||||
	"github.com/docker/docker/layer"
 | 
			
		||||
	"github.com/docker/docker/pkg/ioutils"
 | 
			
		||||
	"github.com/docker/docker/pkg/progress"
 | 
			
		||||
| 
						 | 
				
			
			@ -43,75 +41,75 @@ type v2Pusher struct {
 | 
			
		|||
	config         *ImagePushConfig
 | 
			
		||||
	repo           distribution.Repository
 | 
			
		||||
 | 
			
		||||
	// pushState is state built by the Download functions.
 | 
			
		||||
	pushState pushState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type pushState struct {
 | 
			
		||||
	sync.Mutex
 | 
			
		||||
	// remoteLayers is the set of layers known to exist on the remote side.
 | 
			
		||||
	// This avoids redundant queries when pushing multiple tags that
 | 
			
		||||
	// involve the same layers. It is also used to fill in digest and size
 | 
			
		||||
	// information when building the manifest.
 | 
			
		||||
	remoteLayers map[layer.DiffID]distribution.Descriptor
 | 
			
		||||
	// confirmedV2 is set to true if we confirm we're talking to a v2
 | 
			
		||||
	// registry. This is used to limit fallbacks to the v1 protocol.
 | 
			
		||||
	confirmedV2 bool
 | 
			
		||||
 | 
			
		||||
	// layersPushed is the set of layers known to exist on the remote side.
 | 
			
		||||
	// This avoids redundant queries when pushing multiple tags that
 | 
			
		||||
	// involve the same layers.
 | 
			
		||||
	layersPushed pushMap
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type pushMap struct {
 | 
			
		||||
	sync.Mutex
 | 
			
		||||
	layersPushed map[digest.Digest]bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *v2Pusher) Push(ctx context.Context) (err error) {
 | 
			
		||||
	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
 | 
			
		||||
	p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor)
 | 
			
		||||
 | 
			
		||||
	p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logrus.Debugf("Error getting v2 registry: %v", err)
 | 
			
		||||
		return fallbackError{err: err, confirmedV2: p.confirmedV2}
 | 
			
		||||
		return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err = p.pushV2Repository(ctx); err != nil {
 | 
			
		||||
		if registry.ContinueOnError(err) {
 | 
			
		||||
			return fallbackError{err: err, confirmedV2: p.confirmedV2}
 | 
			
		||||
			return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) {
 | 
			
		||||
	var associations []reference.Association
 | 
			
		||||
	if _, isTagged := p.ref.(reference.NamedTagged); isTagged {
 | 
			
		||||
	if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged {
 | 
			
		||||
		imageID, err := p.config.ReferenceStore.Get(p.ref)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return fmt.Errorf("tag does not exist: %s", p.ref.String())
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		associations = []reference.Association{
 | 
			
		||||
			{
 | 
			
		||||
				Ref:     p.ref,
 | 
			
		||||
				ImageID: imageID,
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		// Pull all tags
 | 
			
		||||
		associations = p.config.ReferenceStore.ReferencesByName(p.ref)
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return fmt.Errorf("error getting tags for %s: %s", p.repoInfo.Name(), err)
 | 
			
		||||
	}
 | 
			
		||||
	if len(associations) == 0 {
 | 
			
		||||
		return fmt.Errorf("no tags to push for %s", p.repoInfo.Name())
 | 
			
		||||
		return p.pushV2Tag(ctx, namedTagged, imageID)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, association := range associations {
 | 
			
		||||
		if err := p.pushV2Tag(ctx, association); err != nil {
 | 
			
		||||
	if !reference.IsNameOnly(p.ref) {
 | 
			
		||||
		return errors.New("cannot push a digest reference")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Pull all tags
 | 
			
		||||
	pushed := 0
 | 
			
		||||
	for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) {
 | 
			
		||||
		if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged {
 | 
			
		||||
			pushed++
 | 
			
		||||
			if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil {
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if pushed == 0 {
 | 
			
		||||
		return fmt.Errorf("no tags to push for %s", p.repoInfo.Name())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Association) error {
 | 
			
		||||
	ref := association.Ref
 | 
			
		||||
func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error {
 | 
			
		||||
	logrus.Debugf("Pushing repository: %s", ref.String())
 | 
			
		||||
 | 
			
		||||
	img, err := p.config.ImageStore.Get(association.ImageID)
 | 
			
		||||
	img, err := p.config.ImageStore.Get(imageID)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err)
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -134,18 +132,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
 | 
			
		|||
	descriptorTemplate := v2PushDescriptor{
 | 
			
		||||
		blobSumService: p.blobSumService,
 | 
			
		||||
		repo:           p.repo,
 | 
			
		||||
		layersPushed:   &p.layersPushed,
 | 
			
		||||
		confirmedV2:    &p.confirmedV2,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Push empty layer if necessary
 | 
			
		||||
	for _, h := range img.History {
 | 
			
		||||
		if h.EmptyLayer {
 | 
			
		||||
			descriptor := descriptorTemplate
 | 
			
		||||
			descriptor.layer = layer.EmptyLayer
 | 
			
		||||
			descriptors = []xfer.UploadDescriptor{&descriptor}
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
		pushState:      &p.pushState,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Loop bounds condition is to avoid pushing the base layer on Windows.
 | 
			
		||||
| 
						 | 
				
			
			@ -157,52 +144,75 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
 | 
			
		|||
		l = l.Parent()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fsLayers, err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
	if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var tag string
 | 
			
		||||
	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
 | 
			
		||||
		tag = tagged.Tag()
 | 
			
		||||
	}
 | 
			
		||||
	m, err := CreateV2Manifest(p.repo.Name(), tag, img, fsLayers)
 | 
			
		||||
	// Try schema2 first
 | 
			
		||||
	builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON())
 | 
			
		||||
	manifest, err := manifestFromBuilder(ctx, builder, descriptors)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	logrus.Infof("Signed manifest for %s using daemon's key: %s", ref.String(), p.config.TrustKey.KeyID())
 | 
			
		||||
	signed, err := schema1.Sign(m, p.config.TrustKey)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	manifestDigest, manifestSize, err := digestFromManifest(signed, ref)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if manifestDigest != "" {
 | 
			
		||||
		if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
 | 
			
		||||
			progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", tagged.Tag(), manifestDigest, manifestSize)
 | 
			
		||||
			// Signal digest to the trust client so it can sign the
 | 
			
		||||
			// push, if appropriate.
 | 
			
		||||
			progress.Aux(p.config.ProgressOutput, PushResult{Tag: tagged.Tag(), Digest: manifestDigest, Size: manifestSize})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	manSvc, err := p.repo.Manifests(ctx)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	return manSvc.Put(signed)
 | 
			
		||||
 | 
			
		||||
	putOptions := []distribution.ManifestServiceOption{client.WithTag(ref.Tag())}
 | 
			
		||||
	if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
 | 
			
		||||
		logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err)
 | 
			
		||||
 | 
			
		||||
		builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, p.repo.Name(), ref.Tag(), img.RawJSON())
 | 
			
		||||
		manifest, err = manifestFromBuilder(ctx, builder, descriptors)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var canonicalManifest []byte
 | 
			
		||||
 | 
			
		||||
	switch v := manifest.(type) {
 | 
			
		||||
	case *schema1.SignedManifest:
 | 
			
		||||
		canonicalManifest = v.Canonical
 | 
			
		||||
	case *schema2.DeserializedManifest:
 | 
			
		||||
		_, canonicalManifest, err = v.Payload()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	manifestDigest := digest.FromBytes(canonicalManifest)
 | 
			
		||||
	progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest))
 | 
			
		||||
	// Signal digest to the trust client so it can sign the
 | 
			
		||||
	// push, if appropriate.
 | 
			
		||||
	progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)})
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) {
 | 
			
		||||
	// descriptors is in reverse order; iterate backwards to get references
 | 
			
		||||
	// appended in the right order.
 | 
			
		||||
	for i := len(descriptors) - 1; i >= 0; i-- {
 | 
			
		||||
		if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return builder.Build(ctx)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type v2PushDescriptor struct {
 | 
			
		||||
	layer          layer.Layer
 | 
			
		||||
	blobSumService *metadata.BlobSumService
 | 
			
		||||
	repo           distribution.Repository
 | 
			
		||||
	layersPushed   *pushMap
 | 
			
		||||
	confirmedV2    *bool
 | 
			
		||||
	pushState      *pushState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (pd *v2PushDescriptor) Key() string {
 | 
			
		||||
| 
						 | 
				
			
			@ -217,25 +227,38 @@ func (pd *v2PushDescriptor) DiffID() layer.DiffID {
 | 
			
		|||
	return pd.layer.DiffID()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
 | 
			
		||||
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
 | 
			
		||||
	diffID := pd.DiffID()
 | 
			
		||||
 | 
			
		||||
	logrus.Debugf("Pushing layer: %s", diffID)
 | 
			
		||||
	pd.pushState.Lock()
 | 
			
		||||
	if _, ok := pd.pushState.remoteLayers[diffID]; ok {
 | 
			
		||||
		// it is already known that the push is not needed and
 | 
			
		||||
		// therefore doing a stat is unnecessary
 | 
			
		||||
		pd.pushState.Unlock()
 | 
			
		||||
		progress.Update(progressOutput, pd.ID(), "Layer already exists")
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	pd.pushState.Unlock()
 | 
			
		||||
 | 
			
		||||
	// Do we have any blobsums associated with this layer's DiffID?
 | 
			
		||||
	possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		dgst, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.layersPushed)
 | 
			
		||||
		descriptor, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.pushState)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			progress.Update(progressOutput, pd.ID(), "Image push failed")
 | 
			
		||||
			return "", retryOnError(err)
 | 
			
		||||
			return retryOnError(err)
 | 
			
		||||
		}
 | 
			
		||||
		if exists {
 | 
			
		||||
			progress.Update(progressOutput, pd.ID(), "Layer already exists")
 | 
			
		||||
			return dgst, nil
 | 
			
		||||
			pd.pushState.Lock()
 | 
			
		||||
			pd.pushState.remoteLayers[diffID] = descriptor
 | 
			
		||||
			pd.pushState.Unlock()
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	logrus.Debugf("Pushing layer: %s", diffID)
 | 
			
		||||
 | 
			
		||||
	// if digest was empty or not saved, or if blob does not exist on the remote repository,
 | 
			
		||||
	// then push the blob.
 | 
			
		||||
	bs := pd.repo.Blobs(ctx)
 | 
			
		||||
| 
						 | 
				
			
			@ -243,13 +266,13 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
 | 
			
		|||
	// Send the layer
 | 
			
		||||
	layerUpload, err := bs.Create(ctx)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", retryOnError(err)
 | 
			
		||||
		return retryOnError(err)
 | 
			
		||||
	}
 | 
			
		||||
	defer layerUpload.Close()
 | 
			
		||||
 | 
			
		||||
	arch, err := pd.layer.TarStream()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", xfer.DoNotRetry{Err: err}
 | 
			
		||||
		return xfer.DoNotRetry{Err: err}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// don't care if this fails; best effort
 | 
			
		||||
| 
						 | 
				
			
			@ -265,183 +288,62 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
 | 
			
		|||
	nn, err := layerUpload.ReadFrom(tee)
 | 
			
		||||
	compressedReader.Close()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", retryOnError(err)
 | 
			
		||||
		return retryOnError(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pushDigest := digester.Digest()
 | 
			
		||||
	if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
 | 
			
		||||
		return "", retryOnError(err)
 | 
			
		||||
		return retryOnError(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If Commit succeded, that's an indication that the remote registry
 | 
			
		||||
	// speaks the v2 protocol.
 | 
			
		||||
	*pd.confirmedV2 = true
 | 
			
		||||
 | 
			
		||||
	logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
 | 
			
		||||
	progress.Update(progressOutput, pd.ID(), "Pushed")
 | 
			
		||||
 | 
			
		||||
	// Cache mapping from this layer's DiffID to the blobsum
 | 
			
		||||
	if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
 | 
			
		||||
		return "", xfer.DoNotRetry{Err: err}
 | 
			
		||||
		return xfer.DoNotRetry{Err: err}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pd.layersPushed.Lock()
 | 
			
		||||
	pd.layersPushed.layersPushed[pushDigest] = true
 | 
			
		||||
	pd.layersPushed.Unlock()
 | 
			
		||||
	pd.pushState.Lock()
 | 
			
		||||
 | 
			
		||||
	return pushDigest, nil
 | 
			
		||||
	// If Commit succeded, that's an indication that the remote registry
 | 
			
		||||
	// speaks the v2 protocol.
 | 
			
		||||
	pd.pushState.confirmedV2 = true
 | 
			
		||||
 | 
			
		||||
	pd.pushState.remoteLayers[diffID] = distribution.Descriptor{
 | 
			
		||||
		Digest:    pushDigest,
 | 
			
		||||
		MediaType: schema2.MediaTypeLayer,
 | 
			
		||||
		Size:      nn,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pd.pushState.Unlock()
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
 | 
			
		||||
	// Not necessary to lock pushStatus because this is always
 | 
			
		||||
	// called after all the mutation in pushStatus.
 | 
			
		||||
	// By the time this function is called, every layer will have
 | 
			
		||||
	// an entry in remoteLayers.
 | 
			
		||||
	return pd.pushState.remoteLayers[pd.DiffID()]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// blobSumAlreadyExists checks if the registry already know about any of the
 | 
			
		||||
// blobsums passed in the "blobsums" slice. If it finds one that the registry
 | 
			
		||||
// knows about, it returns the known digest and "true".
 | 
			
		||||
func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, layersPushed *pushMap) (digest.Digest, bool, error) {
 | 
			
		||||
	layersPushed.Lock()
 | 
			
		||||
func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
 | 
			
		||||
	for _, dgst := range blobsums {
 | 
			
		||||
		if layersPushed.layersPushed[dgst] {
 | 
			
		||||
			// it is already known that the push is not needed and
 | 
			
		||||
			// therefore doing a stat is unnecessary
 | 
			
		||||
			layersPushed.Unlock()
 | 
			
		||||
			return dgst, true, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	layersPushed.Unlock()
 | 
			
		||||
 | 
			
		||||
	for _, dgst := range blobsums {
 | 
			
		||||
		_, err := repo.Blobs(ctx).Stat(ctx, dgst)
 | 
			
		||||
		descriptor, err := repo.Blobs(ctx).Stat(ctx, dgst)
 | 
			
		||||
		switch err {
 | 
			
		||||
		case nil:
 | 
			
		||||
			return dgst, true, nil
 | 
			
		||||
			descriptor.MediaType = schema2.MediaTypeLayer
 | 
			
		||||
			return descriptor, true, nil
 | 
			
		||||
		case distribution.ErrBlobUnknown:
 | 
			
		||||
			// nop
 | 
			
		||||
		default:
 | 
			
		||||
			return "", false, err
 | 
			
		||||
			return distribution.Descriptor{}, false, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return "", false, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateV2Manifest creates a V2 manifest from an image config and set of
 | 
			
		||||
// FSLayer digests.
 | 
			
		||||
// FIXME: This should be moved to the distribution repo, since it will also
 | 
			
		||||
// be useful for converting new manifests to the old format.
 | 
			
		||||
func CreateV2Manifest(name, tag string, img *image.Image, fsLayers map[layer.DiffID]digest.Digest) (*schema1.Manifest, error) {
 | 
			
		||||
	if len(img.History) == 0 {
 | 
			
		||||
		return nil, errors.New("empty history when trying to create V2 manifest")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Generate IDs for each layer
 | 
			
		||||
	// For non-top-level layers, create fake V1Compatibility strings that
 | 
			
		||||
	// fit the format and don't collide with anything else, but don't
 | 
			
		||||
	// result in runnable images on their own.
 | 
			
		||||
	type v1Compatibility struct {
 | 
			
		||||
		ID              string    `json:"id"`
 | 
			
		||||
		Parent          string    `json:"parent,omitempty"`
 | 
			
		||||
		Comment         string    `json:"comment,omitempty"`
 | 
			
		||||
		Created         time.Time `json:"created"`
 | 
			
		||||
		ContainerConfig struct {
 | 
			
		||||
			Cmd []string
 | 
			
		||||
		} `json:"container_config,omitempty"`
 | 
			
		||||
		ThrowAway bool `json:"throwaway,omitempty"`
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fsLayerList := make([]schema1.FSLayer, len(img.History))
 | 
			
		||||
	history := make([]schema1.History, len(img.History))
 | 
			
		||||
 | 
			
		||||
	parent := ""
 | 
			
		||||
	layerCounter := 0
 | 
			
		||||
	for i, h := range img.History {
 | 
			
		||||
		if i == len(img.History)-1 {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		var diffID layer.DiffID
 | 
			
		||||
		if h.EmptyLayer {
 | 
			
		||||
			diffID = layer.EmptyLayer.DiffID()
 | 
			
		||||
		} else {
 | 
			
		||||
			if len(img.RootFS.DiffIDs) <= layerCounter {
 | 
			
		||||
				return nil, errors.New("too many non-empty layers in History section")
 | 
			
		||||
			}
 | 
			
		||||
			diffID = img.RootFS.DiffIDs[layerCounter]
 | 
			
		||||
			layerCounter++
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		fsLayer, present := fsLayers[diffID]
 | 
			
		||||
		if !present {
 | 
			
		||||
			return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
 | 
			
		||||
		}
 | 
			
		||||
		dgst, err := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		v1ID := dgst.Hex()
 | 
			
		||||
 | 
			
		||||
		v1Compatibility := v1Compatibility{
 | 
			
		||||
			ID:      v1ID,
 | 
			
		||||
			Parent:  parent,
 | 
			
		||||
			Comment: h.Comment,
 | 
			
		||||
			Created: h.Created,
 | 
			
		||||
		}
 | 
			
		||||
		v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
 | 
			
		||||
		if h.EmptyLayer {
 | 
			
		||||
			v1Compatibility.ThrowAway = true
 | 
			
		||||
		}
 | 
			
		||||
		jsonBytes, err := json.Marshal(&v1Compatibility)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		reversedIndex := len(img.History) - i - 1
 | 
			
		||||
		history[reversedIndex].V1Compatibility = string(jsonBytes)
 | 
			
		||||
		fsLayerList[reversedIndex] = schema1.FSLayer{BlobSum: fsLayer}
 | 
			
		||||
 | 
			
		||||
		parent = v1ID
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	latestHistory := img.History[len(img.History)-1]
 | 
			
		||||
 | 
			
		||||
	var diffID layer.DiffID
 | 
			
		||||
	if latestHistory.EmptyLayer {
 | 
			
		||||
		diffID = layer.EmptyLayer.DiffID()
 | 
			
		||||
	} else {
 | 
			
		||||
		if len(img.RootFS.DiffIDs) <= layerCounter {
 | 
			
		||||
			return nil, errors.New("too many non-empty layers in History section")
 | 
			
		||||
		}
 | 
			
		||||
		diffID = img.RootFS.DiffIDs[layerCounter]
 | 
			
		||||
	}
 | 
			
		||||
	fsLayer, present := fsLayers[diffID]
 | 
			
		||||
	if !present {
 | 
			
		||||
		return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dgst, err := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent + " " + string(img.RawJSON())))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	fsLayerList[0] = schema1.FSLayer{BlobSum: fsLayer}
 | 
			
		||||
 | 
			
		||||
	// Top-level v1compatibility string should be a modified version of the
 | 
			
		||||
	// image config.
 | 
			
		||||
	transformedConfig, err := v1.MakeV1ConfigFromConfig(img, dgst.Hex(), parent, latestHistory.EmptyLayer)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	history[0].V1Compatibility = string(transformedConfig)
 | 
			
		||||
 | 
			
		||||
	// windows-only baselayer setup
 | 
			
		||||
	if err := setupBaseLayer(history, *img.RootFS); err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &schema1.Manifest{
 | 
			
		||||
		Versioned: manifest.Versioned{
 | 
			
		||||
			SchemaVersion: 1,
 | 
			
		||||
		},
 | 
			
		||||
		Name:         name,
 | 
			
		||||
		Tag:          tag,
 | 
			
		||||
		Architecture: img.Architecture,
 | 
			
		||||
		FSLayers:     fsLayerList,
 | 
			
		||||
		History:      history,
 | 
			
		||||
	}, nil
 | 
			
		||||
	return distribution.Descriptor{}, false, nil
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,176 +0,0 @@
 | 
			
		|||
package distribution
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema1"
 | 
			
		||||
	"github.com/docker/docker/image"
 | 
			
		||||
	"github.com/docker/docker/layer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestCreateV2Manifest(t *testing.T) {
 | 
			
		||||
	imgJSON := `{
 | 
			
		||||
    "architecture": "amd64",
 | 
			
		||||
    "config": {
 | 
			
		||||
        "AttachStderr": false,
 | 
			
		||||
        "AttachStdin": false,
 | 
			
		||||
        "AttachStdout": false,
 | 
			
		||||
        "Cmd": [
 | 
			
		||||
            "/bin/sh",
 | 
			
		||||
            "-c",
 | 
			
		||||
            "echo hi"
 | 
			
		||||
        ],
 | 
			
		||||
        "Domainname": "",
 | 
			
		||||
        "Entrypoint": null,
 | 
			
		||||
        "Env": [
 | 
			
		||||
            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
 | 
			
		||||
            "derived=true",
 | 
			
		||||
            "asdf=true"
 | 
			
		||||
        ],
 | 
			
		||||
        "Hostname": "23304fc829f9",
 | 
			
		||||
        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
 | 
			
		||||
        "Labels": {},
 | 
			
		||||
        "OnBuild": [],
 | 
			
		||||
        "OpenStdin": false,
 | 
			
		||||
        "StdinOnce": false,
 | 
			
		||||
        "Tty": false,
 | 
			
		||||
        "User": "",
 | 
			
		||||
        "Volumes": null,
 | 
			
		||||
        "WorkingDir": ""
 | 
			
		||||
    },
 | 
			
		||||
    "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001",
 | 
			
		||||
    "container_config": {
 | 
			
		||||
        "AttachStderr": false,
 | 
			
		||||
        "AttachStdin": false,
 | 
			
		||||
        "AttachStdout": false,
 | 
			
		||||
        "Cmd": [
 | 
			
		||||
            "/bin/sh",
 | 
			
		||||
            "-c",
 | 
			
		||||
            "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"
 | 
			
		||||
        ],
 | 
			
		||||
        "Domainname": "",
 | 
			
		||||
        "Entrypoint": null,
 | 
			
		||||
        "Env": [
 | 
			
		||||
            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
 | 
			
		||||
            "derived=true",
 | 
			
		||||
            "asdf=true"
 | 
			
		||||
        ],
 | 
			
		||||
        "Hostname": "23304fc829f9",
 | 
			
		||||
        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
 | 
			
		||||
        "Labels": {},
 | 
			
		||||
        "OnBuild": [],
 | 
			
		||||
        "OpenStdin": false,
 | 
			
		||||
        "StdinOnce": false,
 | 
			
		||||
        "Tty": false,
 | 
			
		||||
        "User": "",
 | 
			
		||||
        "Volumes": null,
 | 
			
		||||
        "WorkingDir": ""
 | 
			
		||||
    },
 | 
			
		||||
    "created": "2015-11-04T23:06:32.365666163Z",
 | 
			
		||||
    "docker_version": "1.9.0-dev",
 | 
			
		||||
    "history": [
 | 
			
		||||
        {
 | 
			
		||||
            "created": "2015-10-31T22:22:54.690851953Z",
 | 
			
		||||
            "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "created": "2015-10-31T22:22:55.613815829Z",
 | 
			
		||||
            "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]"
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "created": "2015-11-04T23:06:30.934316144Z",
 | 
			
		||||
            "created_by": "/bin/sh -c #(nop) ENV derived=true",
 | 
			
		||||
            "empty_layer": true
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "created": "2015-11-04T23:06:31.192097572Z",
 | 
			
		||||
            "created_by": "/bin/sh -c #(nop) ENV asdf=true",
 | 
			
		||||
            "empty_layer": true
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "created": "2015-11-04T23:06:32.083868454Z",
 | 
			
		||||
            "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "created": "2015-11-04T23:06:32.365666163Z",
 | 
			
		||||
            "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]",
 | 
			
		||||
            "empty_layer": true
 | 
			
		||||
        }
 | 
			
		||||
    ],
 | 
			
		||||
    "os": "linux",
 | 
			
		||||
    "rootfs": {
 | 
			
		||||
        "diff_ids": [
 | 
			
		||||
            "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
 | 
			
		||||
            "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
 | 
			
		||||
            "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
 | 
			
		||||
        ],
 | 
			
		||||
        "type": "layers"
 | 
			
		||||
    }
 | 
			
		||||
}`
 | 
			
		||||
 | 
			
		||||
	// To fill in rawJSON
 | 
			
		||||
	img, err := image.NewFromJSON([]byte(imgJSON))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("json decoding failed: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fsLayers := map[layer.DiffID]digest.Digest{
 | 
			
		||||
		layer.DiffID("sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1"): digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
 | 
			
		||||
		layer.DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"): digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
 | 
			
		||||
		layer.DiffID("sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"): digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	manifest, err := CreateV2Manifest("testrepo", "testtag", img, fsLayers)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("CreateV2Manifest returned error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if manifest.Versioned.SchemaVersion != 1 {
 | 
			
		||||
		t.Fatal("SchemaVersion != 1")
 | 
			
		||||
	}
 | 
			
		||||
	if manifest.Name != "testrepo" {
 | 
			
		||||
		t.Fatal("incorrect name in manifest")
 | 
			
		||||
	}
 | 
			
		||||
	if manifest.Tag != "testtag" {
 | 
			
		||||
		t.Fatal("incorrect tag in manifest")
 | 
			
		||||
	}
 | 
			
		||||
	if manifest.Architecture != "amd64" {
 | 
			
		||||
		t.Fatal("incorrect arch in manifest")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedFSLayers := []schema1.FSLayer{
 | 
			
		||||
		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
 | 
			
		||||
		{BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
 | 
			
		||||
		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
 | 
			
		||||
		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
 | 
			
		||||
		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
 | 
			
		||||
		{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(manifest.FSLayers) != len(expectedFSLayers) {
 | 
			
		||||
		t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers))
 | 
			
		||||
	}
 | 
			
		||||
	if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) {
 | 
			
		||||
		t.Fatal("wrong FSLayers list")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedV1Compatibility := []string{
 | 
			
		||||
		`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"d728140d3fd23dfcac505954af0b2224b3579b177029eded62916579eb19ac64","os":"linux","parent":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","throwaway":true}`,
 | 
			
		||||
		`{"id":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","parent":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`,
 | 
			
		||||
		`{"id":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","parent":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`,
 | 
			
		||||
		`{"id":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`,
 | 
			
		||||
		`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`,
 | 
			
		||||
		`{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(manifest.History) != len(expectedV1Compatibility) {
 | 
			
		||||
		t.Fatalf("wrong number of history entries: %d", len(manifest.History))
 | 
			
		||||
	}
 | 
			
		||||
	for i := range expectedV1Compatibility {
 | 
			
		||||
		if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] {
 | 
			
		||||
			t.Fatalf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1,12 +0,0 @@
 | 
			
		|||
// +build !windows
 | 
			
		||||
 | 
			
		||||
package distribution
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema1"
 | 
			
		||||
	"github.com/docker/docker/image"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1,28 +0,0 @@
 | 
			
		|||
// +build windows
 | 
			
		||||
 | 
			
		||||
package distribution
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema1"
 | 
			
		||||
	"github.com/docker/docker/image"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
 | 
			
		||||
	var v1Config map[string]*json.RawMessage
 | 
			
		||||
	if err := json.Unmarshal([]byte(history[len(history)-1].V1Compatibility), &v1Config); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	baseID, err := json.Marshal(rootFS.BaseLayerID())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	v1Config["parent"] = (*json.RawMessage)(&baseID)
 | 
			
		||||
	configJSON, err := json.Marshal(v1Config)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	history[len(history)-1].V1Compatibility = string(configJSON)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -8,16 +8,12 @@ import (
 | 
			
		|||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Sirupsen/logrus"
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema1"
 | 
			
		||||
	"github.com/docker/distribution/registry/api/errcode"
 | 
			
		||||
	"github.com/docker/distribution/registry/client"
 | 
			
		||||
	"github.com/docker/distribution/registry/client/auth"
 | 
			
		||||
	"github.com/docker/distribution/registry/client/transport"
 | 
			
		||||
	"github.com/docker/docker/distribution/xfer"
 | 
			
		||||
	"github.com/docker/docker/reference"
 | 
			
		||||
	"github.com/docker/docker/registry"
 | 
			
		||||
	"github.com/docker/engine-api/types"
 | 
			
		||||
	"golang.org/x/net/context"
 | 
			
		||||
| 
						 | 
				
			
			@ -125,20 +121,6 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end
 | 
			
		|||
	return repo, foundVersion, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func digestFromManifest(m *schema1.SignedManifest, name reference.Named) (digest.Digest, int, error) {
 | 
			
		||||
	payload, err := m.Payload()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// If this failed, the signatures section was corrupted
 | 
			
		||||
		// or missing. Treat the entire manifest as the payload.
 | 
			
		||||
		payload = m.Raw
 | 
			
		||||
	}
 | 
			
		||||
	manifestDigest, err := digest.FromBytes(payload)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logrus.Infof("Could not compute manifest digest for %s:%s : %v", name.Name(), m.Tag, err)
 | 
			
		||||
	}
 | 
			
		||||
	return manifestDigest, len(payload), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type existingTokenHandler struct {
 | 
			
		||||
	token string
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -65,12 +65,7 @@ func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.
 | 
			
		|||
		return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...)
 | 
			
		||||
	}
 | 
			
		||||
	// H = "H(n-1) SHA256(n)"
 | 
			
		||||
	dgst, err := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// Digest calculation is not expected to throw an error,
 | 
			
		||||
		// any error at this point is a program error
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
	dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
 | 
			
		||||
	return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -92,11 +87,7 @@ func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (la
 | 
			
		|||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	diffID, err := digest.FromBytes(l.layerData.Bytes())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	l.diffID = layer.DiffID(diffID)
 | 
			
		||||
	l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes()))
 | 
			
		||||
	l.chainID = createChainIDFromParent(parentID, l.diffID)
 | 
			
		||||
 | 
			
		||||
	ls.layers[l.chainID] = l
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,7 +5,6 @@ import (
 | 
			
		|||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Sirupsen/logrus"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/docker/layer"
 | 
			
		||||
	"github.com/docker/docker/pkg/progress"
 | 
			
		||||
	"golang.org/x/net/context"
 | 
			
		||||
| 
						 | 
				
			
			@ -30,7 +29,6 @@ type uploadTransfer struct {
 | 
			
		|||
	Transfer
 | 
			
		||||
 | 
			
		||||
	diffID layer.DiffID
 | 
			
		||||
	digest digest.Digest
 | 
			
		||||
	err    error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -43,16 +41,15 @@ type UploadDescriptor interface {
 | 
			
		|||
	// DiffID should return the DiffID for this layer.
 | 
			
		||||
	DiffID() layer.DiffID
 | 
			
		||||
	// Upload is called to perform the Upload.
 | 
			
		||||
	Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error)
 | 
			
		||||
	Upload(ctx context.Context, progressOutput progress.Output) error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Upload is a blocking function which ensures the listed layers are present on
 | 
			
		||||
// the remote registry. It uses the string returned by the Key method to
 | 
			
		||||
// deduplicate uploads.
 | 
			
		||||
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) (map[layer.DiffID]digest.Digest, error) {
 | 
			
		||||
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
 | 
			
		||||
	var (
 | 
			
		||||
		uploads          []*uploadTransfer
 | 
			
		||||
		digests          = make(map[layer.DiffID]digest.Digest)
 | 
			
		||||
		dedupDescriptors = make(map[string]struct{})
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -74,16 +71,15 @@ func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescri
 | 
			
		|||
	for _, upload := range uploads {
 | 
			
		||||
		select {
 | 
			
		||||
		case <-ctx.Done():
 | 
			
		||||
			return nil, ctx.Err()
 | 
			
		||||
			return ctx.Err()
 | 
			
		||||
		case <-upload.Transfer.Done():
 | 
			
		||||
			if upload.err != nil {
 | 
			
		||||
				return nil, upload.err
 | 
			
		||||
				return upload.err
 | 
			
		||||
			}
 | 
			
		||||
			digests[upload.diffID] = upload.digest
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return digests, nil
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
 | 
			
		||||
| 
						 | 
				
			
			@ -109,9 +105,8 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun
 | 
			
		|||
 | 
			
		||||
			retries := 0
 | 
			
		||||
			for {
 | 
			
		||||
				digest, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
 | 
			
		||||
				err := descriptor.Upload(u.Transfer.Context(), progressOutput)
 | 
			
		||||
				if err == nil {
 | 
			
		||||
					u.digest = digest
 | 
			
		||||
					break
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,12 +36,12 @@ func (u *mockUploadDescriptor) DiffID() layer.DiffID {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
// Upload is called to perform the upload.
 | 
			
		||||
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
 | 
			
		||||
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
 | 
			
		||||
	if u.currentUploads != nil {
 | 
			
		||||
		defer atomic.AddInt32(u.currentUploads, -1)
 | 
			
		||||
 | 
			
		||||
		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
 | 
			
		||||
			return "", errors.New("concurrency limit exceeded")
 | 
			
		||||
			return errors.New("concurrency limit exceeded")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -49,7 +49,7 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
 | 
			
		|||
	for i := int64(0); i <= 10; i++ {
 | 
			
		||||
		select {
 | 
			
		||||
		case <-ctx.Done():
 | 
			
		||||
			return "", ctx.Err()
 | 
			
		||||
			return ctx.Err()
 | 
			
		||||
		case <-time.After(10 * time.Millisecond):
 | 
			
		||||
			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -57,12 +57,10 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
 | 
			
		|||
 | 
			
		||||
	if u.simulateRetries != 0 {
 | 
			
		||||
		u.simulateRetries--
 | 
			
		||||
		return "", errors.New("simulating retry")
 | 
			
		||||
		return errors.New("simulating retry")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// For the mock implementation, use SHA256(DiffID) as the returned
 | 
			
		||||
	// digest.
 | 
			
		||||
	return digest.FromBytes([]byte(u.diffID.String()))
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
 | 
			
		||||
| 
						 | 
				
			
			@ -101,26 +99,13 @@ func TestSuccessfulUpload(t *testing.T) {
 | 
			
		|||
	var currentUploads int32
 | 
			
		||||
	descriptors := uploadDescriptors(¤tUploads)
 | 
			
		||||
 | 
			
		||||
	digests, err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
 | 
			
		||||
	err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("upload error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	close(progressChan)
 | 
			
		||||
	<-progressDone
 | 
			
		||||
 | 
			
		||||
	if len(digests) != len(expectedDigests) {
 | 
			
		||||
		t.Fatal("wrong number of keys in digests map")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for key, val := range expectedDigests {
 | 
			
		||||
		if digests[key] != val {
 | 
			
		||||
			t.Fatalf("mismatch in digest array for key %v (expected %v, got %v)", key, val, digests[key])
 | 
			
		||||
		}
 | 
			
		||||
		if receivedProgress[key.String()] != 10 {
 | 
			
		||||
			t.Fatalf("missing or wrong progress output for %v", key)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCancelledUpload(t *testing.T) {
 | 
			
		||||
| 
						 | 
				
			
			@ -143,7 +128,7 @@ func TestCancelledUpload(t *testing.T) {
 | 
			
		|||
	}()
 | 
			
		||||
 | 
			
		||||
	descriptors := uploadDescriptors(nil)
 | 
			
		||||
	_, err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
 | 
			
		||||
	err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
 | 
			
		||||
	if err != context.Canceled {
 | 
			
		||||
		t.Fatal("expected upload to be cancelled")
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -44,7 +44,7 @@ clone git github.com/boltdb/bolt v1.1.0
 | 
			
		|||
clone git github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
 | 
			
		||||
 | 
			
		||||
# get graph and distribution packages
 | 
			
		||||
clone git github.com/docker/distribution 568bf038af6d65b376165d02886b1c7fcaef1f61
 | 
			
		||||
clone git github.com/docker/distribution a7ae88da459b98b481a245e5b1750134724ac67d
 | 
			
		||||
clone git github.com/vbatts/tar-split v0.9.11
 | 
			
		||||
 | 
			
		||||
# get desired notary commit, might also need to be updated in Dockerfile
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										12
									
								
								image/fs.go
									
										
									
									
									
								
							
							
						
						
									
										12
									
								
								image/fs.go
									
										
									
									
									
								
							| 
						 | 
				
			
			@ -101,11 +101,7 @@ func (s *fs) get(id ID) ([]byte, error) {
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	// todo: maybe optional
 | 
			
		||||
	validated, err := digest.FromBytes(content)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	if ID(validated) != id {
 | 
			
		||||
	if ID(digest.FromBytes(content)) != id {
 | 
			
		||||
		return nil, fmt.Errorf("failed to verify image: %v", id)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -121,11 +117,7 @@ func (s *fs) Set(data []byte) (ID, error) {
 | 
			
		|||
		return "", fmt.Errorf("Invalid empty data")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dgst, err := digest.FromBytes(data)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
	id := ID(dgst)
 | 
			
		||||
	id := ID(digest.FromBytes(data))
 | 
			
		||||
	filePath := s.contentFile(id)
 | 
			
		||||
	tempFilePath := s.contentFile(id) + ".tmp"
 | 
			
		||||
	if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -67,10 +67,7 @@ func TestFSInvalidSet(t *testing.T) {
 | 
			
		|||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	id, err := digest.FromBytes([]byte("foobar"))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	id := digest.FromBytes([]byte("foobar"))
 | 
			
		||||
	err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
| 
						 | 
				
			
			@ -160,11 +157,7 @@ func testMetadataGetSet(t *testing.T, store StoreBackend) {
 | 
			
		|||
		t.Fatal("Expected error for getting metadata for unknown key")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	id3, err := digest.FromBytes([]byte("baz"))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	id3 := digest.FromBytes([]byte("baz"))
 | 
			
		||||
	err = store.SetMetadata(ID(id3), "tkey", []byte("tval"))
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		t.Fatal("Expected error for setting metadata for unknown ID.")
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -27,7 +27,7 @@ func (r *RootFS) BaseLayerID() string {
 | 
			
		|||
 | 
			
		||||
// ChainID returns the ChainID for the top layer in RootFS.
 | 
			
		||||
func (r *RootFS) ChainID() layer.ChainID {
 | 
			
		||||
	baseDiffID, _ := digest.FromBytes([]byte(r.BaseLayerID())) // can never error
 | 
			
		||||
	baseDiffID := digest.FromBytes([]byte(r.BaseLayerID()))
 | 
			
		||||
	return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -63,7 +63,7 @@ func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest
 | 
			
		|||
	}
 | 
			
		||||
	logrus.Debugf("CreateV1ID %s", configJSON)
 | 
			
		||||
 | 
			
		||||
	return digest.FromBytes(configJSON)
 | 
			
		||||
	return digest.FromBytes(configJSON), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MakeConfigFromV1Config creates an image config from the legacy V1 config format.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -48,7 +48,7 @@ type DockerRegistrySuite struct {
 | 
			
		|||
 | 
			
		||||
func (s *DockerRegistrySuite) SetUpTest(c *check.C) {
 | 
			
		||||
	testRequires(c, DaemonIsLinux)
 | 
			
		||||
	s.reg = setupRegistry(c)
 | 
			
		||||
	s.reg = setupRegistry(c, false)
 | 
			
		||||
	s.d = NewDaemon(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -62,6 +62,34 @@ func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
 | 
			
		|||
	s.d.Stop()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	check.Suite(&DockerSchema1RegistrySuite{
 | 
			
		||||
		ds: &DockerSuite{},
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type DockerSchema1RegistrySuite struct {
 | 
			
		||||
	ds  *DockerSuite
 | 
			
		||||
	reg *testRegistryV2
 | 
			
		||||
	d   *Daemon
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) {
 | 
			
		||||
	testRequires(c, DaemonIsLinux)
 | 
			
		||||
	s.reg = setupRegistry(c, true)
 | 
			
		||||
	s.d = NewDaemon(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) {
 | 
			
		||||
	if s.reg != nil {
 | 
			
		||||
		s.reg.Close()
 | 
			
		||||
	}
 | 
			
		||||
	if s.ds != nil {
 | 
			
		||||
		s.ds.TearDownTest(c)
 | 
			
		||||
	}
 | 
			
		||||
	s.d.Stop()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	check.Suite(&DockerDaemonSuite{
 | 
			
		||||
		ds: &DockerSuite{},
 | 
			
		||||
| 
						 | 
				
			
			@ -97,7 +125,7 @@ type DockerTrustSuite struct {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerTrustSuite) SetUpTest(c *check.C) {
 | 
			
		||||
	s.reg = setupRegistry(c)
 | 
			
		||||
	s.reg = setupRegistry(c, false)
 | 
			
		||||
	s.not = setupNotary(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -10,6 +10,7 @@ import (
 | 
			
		|||
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema1"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema2"
 | 
			
		||||
	"github.com/docker/docker/pkg/integration/checker"
 | 
			
		||||
	"github.com/docker/docker/pkg/stringutils"
 | 
			
		||||
	"github.com/docker/engine-api/types"
 | 
			
		||||
| 
						 | 
				
			
			@ -56,7 +57,7 @@ func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) {
 | 
			
		|||
	return digest.Digest(pushDigest), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
 | 
			
		||||
func testPullByTagDisplaysDigest(c *check.C) {
 | 
			
		||||
	testRequires(c, DaemonIsLinux)
 | 
			
		||||
	pushDigest, err := setupImage(c)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
 | 
			
		||||
| 
						 | 
				
			
			@ -73,7 +74,15 @@ func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
 | 
			
		|||
	c.Assert(pushDigest.String(), checker.Equals, pullDigest)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
 | 
			
		||||
	testPullByTagDisplaysDigest(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
 | 
			
		||||
	testPullByTagDisplaysDigest(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testPullByDigest(c *check.C) {
 | 
			
		||||
	testRequires(c, DaemonIsLinux)
 | 
			
		||||
	pushDigest, err := setupImage(c)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
 | 
			
		||||
| 
						 | 
				
			
			@ -91,7 +100,15 @@ func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
 | 
			
		|||
	c.Assert(pushDigest.String(), checker.Equals, pullDigest)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
 | 
			
		||||
	testPullByDigest(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) {
 | 
			
		||||
	testPullByDigest(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testPullByDigestNoFallback(c *check.C) {
 | 
			
		||||
	testRequires(c, DaemonIsLinux)
 | 
			
		||||
	// pull from the registry using the <name>@<digest> reference
 | 
			
		||||
	imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName)
 | 
			
		||||
| 
						 | 
				
			
			@ -100,6 +117,14 @@ func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
 | 
			
		|||
	c.Assert(out, checker.Contains, "manifest unknown", check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image"))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
 | 
			
		||||
	testPullByDigestNoFallback(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) {
 | 
			
		||||
	testPullByDigestNoFallback(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) {
 | 
			
		||||
	pushDigest, err := setupImage(c)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
 | 
			
		||||
| 
						 | 
				
			
			@ -372,6 +397,7 @@ func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C)
 | 
			
		|||
 | 
			
		||||
// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when
 | 
			
		||||
// we have modified a manifest blob and its digest cannot be verified.
 | 
			
		||||
// This is the schema2 version of the test.
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
 | 
			
		||||
	testRequires(c, DaemonIsLinux)
 | 
			
		||||
	manifestDigest, err := setupImage(c)
 | 
			
		||||
| 
						 | 
				
			
			@ -380,6 +406,46 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
 | 
			
		|||
	// Load the target manifest blob.
 | 
			
		||||
	manifestBlob := s.reg.readBlobContents(c, manifestDigest)
 | 
			
		||||
 | 
			
		||||
	var imgManifest schema2.Manifest
 | 
			
		||||
	err = json.Unmarshal(manifestBlob, &imgManifest)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob"))
 | 
			
		||||
 | 
			
		||||
	// Change a layer in the manifest.
 | 
			
		||||
	imgManifest.Layers[0].Digest = digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")
 | 
			
		||||
 | 
			
		||||
	// Move the existing data file aside, so that we can replace it with a
 | 
			
		||||
	// malicious blob of data. NOTE: we defer the returned undo func.
 | 
			
		||||
	undo := s.reg.tempMoveBlobData(c, manifestDigest)
 | 
			
		||||
	defer undo()
 | 
			
		||||
 | 
			
		||||
	alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", "   ")
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON"))
 | 
			
		||||
 | 
			
		||||
	s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob)
 | 
			
		||||
 | 
			
		||||
	// Now try pulling that image by digest. We should get an error about
 | 
			
		||||
	// digest verification for the manifest digest.
 | 
			
		||||
 | 
			
		||||
	// Pull from the registry using the <name>@<digest> reference.
 | 
			
		||||
	imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
 | 
			
		||||
	out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
 | 
			
		||||
	c.Assert(exitStatus, checker.Not(check.Equals), 0)
 | 
			
		||||
 | 
			
		||||
	expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest)
 | 
			
		||||
	c.Assert(out, checker.Contains, expectedErrorMsg)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when
 | 
			
		||||
// we have modified a manifest blob and its digest cannot be verified.
 | 
			
		||||
// This is the schema1 version of the test.
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
 | 
			
		||||
	testRequires(c, DaemonIsLinux)
 | 
			
		||||
	manifestDigest, err := setupImage(c)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
 | 
			
		||||
 | 
			
		||||
	// Load the target manifest blob.
 | 
			
		||||
	manifestBlob := s.reg.readBlobContents(c, manifestDigest)
 | 
			
		||||
 | 
			
		||||
	var imgManifest schema1.Manifest
 | 
			
		||||
	err = json.Unmarshal(manifestBlob, &imgManifest)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob"))
 | 
			
		||||
| 
						 | 
				
			
			@ -413,6 +479,7 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
 | 
			
		|||
 | 
			
		||||
// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when
 | 
			
		||||
// we have modified a layer blob and its digest cannot be verified.
 | 
			
		||||
// This is the schema2 version of the test.
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
 | 
			
		||||
	testRequires(c, DaemonIsLinux)
 | 
			
		||||
	manifestDigest, err := setupImage(c)
 | 
			
		||||
| 
						 | 
				
			
			@ -421,6 +488,49 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
 | 
			
		|||
	// Load the target manifest blob.
 | 
			
		||||
	manifestBlob := s.reg.readBlobContents(c, manifestDigest)
 | 
			
		||||
 | 
			
		||||
	var imgManifest schema2.Manifest
 | 
			
		||||
	err = json.Unmarshal(manifestBlob, &imgManifest)
 | 
			
		||||
	c.Assert(err, checker.IsNil)
 | 
			
		||||
 | 
			
		||||
	// Next, get the digest of one of the layers from the manifest.
 | 
			
		||||
	targetLayerDigest := imgManifest.Layers[0].Digest
 | 
			
		||||
 | 
			
		||||
	// Move the existing data file aside, so that we can replace it with a
 | 
			
		||||
	// malicious blob of data. NOTE: we defer the returned undo func.
 | 
			
		||||
	undo := s.reg.tempMoveBlobData(c, targetLayerDigest)
 | 
			
		||||
	defer undo()
 | 
			
		||||
 | 
			
		||||
	// Now make a fake data blob in this directory.
 | 
			
		||||
	s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for."))
 | 
			
		||||
 | 
			
		||||
	// Now try pulling that image by digest. We should get an error about
 | 
			
		||||
	// digest verification for the target layer digest.
 | 
			
		||||
 | 
			
		||||
	// Remove distribution cache to force a re-pull of the blobs
 | 
			
		||||
	if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil {
 | 
			
		||||
		c.Fatalf("error clearing distribution cache: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Pull from the registry using the <name>@<digest> reference.
 | 
			
		||||
	imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
 | 
			
		||||
	out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
 | 
			
		||||
	c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a zero exit status"))
 | 
			
		||||
 | 
			
		||||
	expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest)
 | 
			
		||||
	c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when
 | 
			
		||||
// we have modified a layer blob and its digest cannot be verified.
 | 
			
		||||
// This is the schema1 version of the test.
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
 | 
			
		||||
	testRequires(c, DaemonIsLinux)
 | 
			
		||||
	manifestDigest, err := setupImage(c)
 | 
			
		||||
	c.Assert(err, checker.IsNil)
 | 
			
		||||
 | 
			
		||||
	// Load the target manifest blob.
 | 
			
		||||
	manifestBlob := s.reg.readBlobContents(c, manifestDigest)
 | 
			
		||||
 | 
			
		||||
	var imgManifest schema1.Manifest
 | 
			
		||||
	err = json.Unmarshal(manifestBlob, &imgManifest)
 | 
			
		||||
	c.Assert(err, checker.IsNil)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,19 +1,29 @@
 | 
			
		|||
package main
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"os"
 | 
			
		||||
	"os/exec"
 | 
			
		||||
	"path/filepath"
 | 
			
		||||
	"runtime"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest"
 | 
			
		||||
	"github.com/docker/distribution/manifest/manifestlist"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema2"
 | 
			
		||||
	"github.com/docker/docker/pkg/integration/checker"
 | 
			
		||||
	"github.com/go-check/check"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// TestPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other
 | 
			
		||||
// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other
 | 
			
		||||
// tags for the same image) are not also pulled down.
 | 
			
		||||
//
 | 
			
		||||
// Ref: docker/docker#8141
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) {
 | 
			
		||||
func testPullImageWithAliases(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 | 
			
		||||
 | 
			
		||||
	repos := []string{}
 | 
			
		||||
| 
						 | 
				
			
			@ -40,8 +50,16 @@ func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) {
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TestConcurrentPullWholeRepo pulls the same repo concurrently.
 | 
			
		||||
func (s *DockerRegistrySuite) TestConcurrentPullWholeRepo(c *check.C) {
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) {
 | 
			
		||||
	testPullImageWithAliases(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) {
 | 
			
		||||
	testPullImageWithAliases(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// testConcurrentPullWholeRepo pulls the same repo concurrently.
 | 
			
		||||
func testConcurrentPullWholeRepo(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 | 
			
		||||
 | 
			
		||||
	repos := []string{}
 | 
			
		||||
| 
						 | 
				
			
			@ -89,8 +107,16 @@ func (s *DockerRegistrySuite) TestConcurrentPullWholeRepo(c *check.C) {
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TestConcurrentFailingPull tries a concurrent pull that doesn't succeed.
 | 
			
		||||
func (s *DockerRegistrySuite) TestConcurrentFailingPull(c *check.C) {
 | 
			
		||||
func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
 | 
			
		||||
	testConcurrentPullWholeRepo(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
 | 
			
		||||
	testConcurrentPullWholeRepo(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// testConcurrentFailingPull tries a concurrent pull that doesn't succeed.
 | 
			
		||||
func testConcurrentFailingPull(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 | 
			
		||||
 | 
			
		||||
	// Run multiple pulls concurrently
 | 
			
		||||
| 
						 | 
				
			
			@ -112,9 +138,17 @@ func (s *DockerRegistrySuite) TestConcurrentFailingPull(c *check.C) {
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TestConcurrentPullMultipleTags pulls multiple tags from the same repo
 | 
			
		||||
func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) {
 | 
			
		||||
	testConcurrentFailingPull(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) {
 | 
			
		||||
	testConcurrentFailingPull(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// testConcurrentPullMultipleTags pulls multiple tags from the same repo
 | 
			
		||||
// concurrently.
 | 
			
		||||
func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
 | 
			
		||||
func testConcurrentPullMultipleTags(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 | 
			
		||||
 | 
			
		||||
	repos := []string{}
 | 
			
		||||
| 
						 | 
				
			
			@ -161,9 +195,17 @@ func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TestPullIDStability verifies that pushing an image and pulling it back
 | 
			
		||||
func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
 | 
			
		||||
	testConcurrentPullMultipleTags(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
 | 
			
		||||
	testConcurrentPullMultipleTags(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// testPullIDStability verifies that pushing an image and pulling it back
 | 
			
		||||
// preserves the image ID.
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) {
 | 
			
		||||
func testPullIDStability(c *check.C) {
 | 
			
		||||
	derivedImage := privateRegistryURL + "/dockercli/id-stability"
 | 
			
		||||
	baseImage := "busybox"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -229,6 +271,14 @@ func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) {
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) {
 | 
			
		||||
	testPullIDStability(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) {
 | 
			
		||||
	testPullIDStability(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TestPullFallbackOn404 tries to pull a nonexistent manifest and confirms that
 | 
			
		||||
// the pull falls back to the v1 protocol.
 | 
			
		||||
//
 | 
			
		||||
| 
						 | 
				
			
			@ -240,3 +290,85 @@ func (s *DockerRegistrySuite) TestPullFallbackOn404(c *check.C) {
 | 
			
		|||
 | 
			
		||||
	c.Assert(out, checker.Contains, "v1 ping attempt")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) {
 | 
			
		||||
	pushDigest, err := setupImage(c)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
 | 
			
		||||
 | 
			
		||||
	// Inject a manifest list into the registry
 | 
			
		||||
	manifestList := &manifestlist.ManifestList{
 | 
			
		||||
		Versioned: manifest.Versioned{
 | 
			
		||||
			SchemaVersion: 2,
 | 
			
		||||
			MediaType:     manifestlist.MediaTypeManifestList,
 | 
			
		||||
		},
 | 
			
		||||
		Manifests: []manifestlist.ManifestDescriptor{
 | 
			
		||||
			{
 | 
			
		||||
				Descriptor: distribution.Descriptor{
 | 
			
		||||
					Digest:    "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
 | 
			
		||||
					Size:      3253,
 | 
			
		||||
					MediaType: schema2.MediaTypeManifest,
 | 
			
		||||
				},
 | 
			
		||||
				Platform: manifestlist.PlatformSpec{
 | 
			
		||||
					Architecture: "bogus_arch",
 | 
			
		||||
					OS:           "bogus_os",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			{
 | 
			
		||||
				Descriptor: distribution.Descriptor{
 | 
			
		||||
					Digest:    pushDigest,
 | 
			
		||||
					Size:      3253,
 | 
			
		||||
					MediaType: schema2.MediaTypeManifest,
 | 
			
		||||
				},
 | 
			
		||||
				Platform: manifestlist.PlatformSpec{
 | 
			
		||||
					Architecture: runtime.GOARCH,
 | 
			
		||||
					OS:           runtime.GOOS,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	manifestListJSON, err := json.MarshalIndent(manifestList, "", "   ")
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list"))
 | 
			
		||||
 | 
			
		||||
	manifestListDigest := digest.FromBytes(manifestListJSON)
 | 
			
		||||
	hexDigest := manifestListDigest.Hex()
 | 
			
		||||
 | 
			
		||||
	registryV2Path := filepath.Join(s.reg.dir, "docker", "registry", "v2")
 | 
			
		||||
 | 
			
		||||
	// Write manifest list to blob store
 | 
			
		||||
	blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest)
 | 
			
		||||
	err = os.MkdirAll(blobDir, 0755)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir"))
 | 
			
		||||
	blobPath := filepath.Join(blobDir, "data")
 | 
			
		||||
	err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list"))
 | 
			
		||||
 | 
			
		||||
	// Add to revision store
 | 
			
		||||
	revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest)
 | 
			
		||||
	err = os.Mkdir(revisionDir, 0755)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir"))
 | 
			
		||||
	revisionPath := filepath.Join(revisionDir, "link")
 | 
			
		||||
	err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error writing revision link"))
 | 
			
		||||
 | 
			
		||||
	// Update tag
 | 
			
		||||
	tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link")
 | 
			
		||||
	err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644)
 | 
			
		||||
	c.Assert(err, checker.IsNil, check.Commentf("error writing tag link"))
 | 
			
		||||
 | 
			
		||||
	// Verify that the image can be pulled through the manifest list.
 | 
			
		||||
	out, _ := dockerCmd(c, "pull", repoName)
 | 
			
		||||
 | 
			
		||||
	// The pull output includes "Digest: <digest>", so find that
 | 
			
		||||
	matches := digestRegex.FindStringSubmatch(out)
 | 
			
		||||
	c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out))
 | 
			
		||||
	pullDigest := matches[1]
 | 
			
		||||
 | 
			
		||||
	// Make sure the pushed and pull digests match
 | 
			
		||||
	c.Assert(manifestListDigest.String(), checker.Equals, pullDigest)
 | 
			
		||||
 | 
			
		||||
	// Was the image actually created?
 | 
			
		||||
	dockerCmd(c, "inspect", repoName)
 | 
			
		||||
 | 
			
		||||
	dockerCmd(c, "rmi", repoName)
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -16,7 +16,7 @@ import (
 | 
			
		|||
)
 | 
			
		||||
 | 
			
		||||
// Pushing an image to a private registry.
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) {
 | 
			
		||||
func testPushBusyboxImage(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 | 
			
		||||
	// tag the image to upload it to the private registry
 | 
			
		||||
	dockerCmd(c, "tag", "busybox", repoName)
 | 
			
		||||
| 
						 | 
				
			
			@ -24,13 +24,21 @@ func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) {
 | 
			
		|||
	dockerCmd(c, "push", repoName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) {
 | 
			
		||||
	testPushBusyboxImage(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) {
 | 
			
		||||
	testPushBusyboxImage(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// pushing an image without a prefix should throw an error
 | 
			
		||||
func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) {
 | 
			
		||||
	out, _, err := dockerCmdWithError("push", "busybox")
 | 
			
		||||
	c.Assert(err, check.NotNil, check.Commentf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) {
 | 
			
		||||
func testPushUntagged(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 | 
			
		||||
	expected := "Repository does not exist"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -39,7 +47,15 @@ func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) {
 | 
			
		|||
	c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed"))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) {
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) {
 | 
			
		||||
	testPushUntagged(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) {
 | 
			
		||||
	testPushUntagged(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testPushBadTag(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL)
 | 
			
		||||
	expected := "does not exist"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -48,7 +64,15 @@ func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) {
 | 
			
		|||
	c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed"))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) {
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) {
 | 
			
		||||
	testPushBadTag(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) {
 | 
			
		||||
	testPushBadTag(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testPushMultipleTags(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 | 
			
		||||
	repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL)
 | 
			
		||||
	repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL)
 | 
			
		||||
| 
						 | 
				
			
			@ -85,7 +109,15 @@ func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) {
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) {
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) {
 | 
			
		||||
	testPushMultipleTags(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) {
 | 
			
		||||
	testPushMultipleTags(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testPushEmptyLayer(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL)
 | 
			
		||||
	emptyTarball, err := ioutil.TempFile("", "empty_tarball")
 | 
			
		||||
	c.Assert(err, check.IsNil, check.Commentf("Unable to create test file"))
 | 
			
		||||
| 
						 | 
				
			
			@ -107,6 +139,14 @@ func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) {
 | 
			
		|||
	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) {
 | 
			
		||||
	testPushEmptyLayer(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) {
 | 
			
		||||
	testPushEmptyLayer(c)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *DockerTrustSuite) TestTrustedPush(c *check.C) {
 | 
			
		||||
	repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL)
 | 
			
		||||
	// tag the image and upload it to the private registry
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1554,9 +1554,9 @@ func daemonTime(c *check.C) time.Time {
 | 
			
		|||
	return dt
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func setupRegistry(c *check.C) *testRegistryV2 {
 | 
			
		||||
func setupRegistry(c *check.C, schema1 bool) *testRegistryV2 {
 | 
			
		||||
	testRequires(c, RegistryHosting)
 | 
			
		||||
	reg, err := newTestRegistryV2(c)
 | 
			
		||||
	reg, err := newTestRegistryV2(c, schema1)
 | 
			
		||||
	c.Assert(err, check.IsNil)
 | 
			
		||||
 | 
			
		||||
	// Wait for registry to be ready to serve requests.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -12,14 +12,17 @@ import (
 | 
			
		|||
	"github.com/go-check/check"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const v2binary = "registry-v2"
 | 
			
		||||
const (
 | 
			
		||||
	v2binary        = "registry-v2"
 | 
			
		||||
	v2binarySchema1 = "registry-v2-schema1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type testRegistryV2 struct {
 | 
			
		||||
	cmd *exec.Cmd
 | 
			
		||||
	dir string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newTestRegistryV2(c *check.C) (*testRegistryV2, error) {
 | 
			
		||||
func newTestRegistryV2(c *check.C, schema1 bool) (*testRegistryV2, error) {
 | 
			
		||||
	template := `version: 0.1
 | 
			
		||||
loglevel: debug
 | 
			
		||||
storage:
 | 
			
		||||
| 
						 | 
				
			
			@ -41,7 +44,11 @@ http:
 | 
			
		|||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cmd := exec.Command(v2binary, confPath)
 | 
			
		||||
	binary := v2binary
 | 
			
		||||
	if schema1 {
 | 
			
		||||
		binary = v2binarySchema1
 | 
			
		||||
	}
 | 
			
		||||
	cmd := exec.Command(binary, confPath)
 | 
			
		||||
	if err := cmd.Start(); err != nil {
 | 
			
		||||
		os.RemoveAll(tmp)
 | 
			
		||||
		if os.IsNotExist(err) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -15,12 +15,8 @@ import (
 | 
			
		|||
 | 
			
		||||
func randomLayerID(seed int64) ChainID {
 | 
			
		||||
	r := rand.New(rand.NewSource(seed))
 | 
			
		||||
	dgst, err := digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ChainID(dgst)
 | 
			
		||||
	return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63()))))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -233,12 +233,7 @@ func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID {
 | 
			
		|||
		return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...)
 | 
			
		||||
	}
 | 
			
		||||
	// H = "H(n-1) SHA256(n)"
 | 
			
		||||
	dgst, err := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// Digest calculation is not expected to throw an error,
 | 
			
		||||
		// any error at this point is a program error
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
	dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
 | 
			
		||||
	return createChainIDFromParent(ChainID(dgst), dgsts[1:]...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -548,10 +548,7 @@ func TestTarStreamStability(t *testing.T) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
func assertLayerDiff(t *testing.T, expected []byte, layer Layer) {
 | 
			
		||||
	expectedDigest, err := digest.FromBytes(expected)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	expectedDigest := digest.FromBytes(expected)
 | 
			
		||||
 | 
			
		||||
	if digest.Digest(layer.DiffID()) != expectedDigest {
 | 
			
		||||
		t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected)
 | 
			
		||||
| 
						 | 
				
			
			@ -573,10 +570,7 @@ func assertLayerDiff(t *testing.T, expected []byte, layer Layer) {
 | 
			
		|||
		t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	actualDigest, err := digest.FromBytes(actual)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
	actualDigest := digest.FromBytes(actual)
 | 
			
		||||
 | 
			
		||||
	if actualDigest != expectedDigest {
 | 
			
		||||
		logByteDiff(t, actual, expected)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -37,10 +37,7 @@ func GetLayerPath(s Store, layer ChainID) (string, error) {
 | 
			
		|||
 | 
			
		||||
func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) {
 | 
			
		||||
	var err error // this is used for cleanup in existingLayer case
 | 
			
		||||
	diffID, err := digest.FromBytes([]byte(graphID))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	diffID := digest.FromBytes([]byte(graphID))
 | 
			
		||||
 | 
			
		||||
	// Create new roLayer
 | 
			
		||||
	layer := &roLayer{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,3 +5,10 @@ Brian Bland <brian.bland@docker.com>    Brian Bland <r4nd0m1n4t0r@gmail.com>
 | 
			
		|||
Josh Hawn <josh.hawn@docker.com>        Josh Hawn <jlhawn@berkeley.edu>
 | 
			
		||||
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
 | 
			
		||||
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
 | 
			
		||||
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
 | 
			
		||||
harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
 | 
			
		||||
Jessie Frazelle <jessie@docker.com>  <jfrazelle@users.noreply.github.com>
 | 
			
		||||
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
 | 
			
		||||
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
 | 
			
		||||
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
 | 
			
		||||
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
 | 
			
		||||
| 
						 | 
				
			
			@ -5,13 +5,16 @@ Adrian Mouat <adrian.mouat@gmail.com>
 | 
			
		|||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
 | 
			
		||||
Alex Chan <alex.chan@metaswitch.com>
 | 
			
		||||
Alex Elman <aelman@indeed.com>
 | 
			
		||||
amitshukla <ashukla73@hotmail.com>
 | 
			
		||||
Amy Lindburg <amy.lindburg@docker.com>
 | 
			
		||||
Andrew Meredith <andymeredith@gmail.com>
 | 
			
		||||
Andrey Kostov <kostov.andrey@gmail.com>
 | 
			
		||||
Andy Goldstein <agoldste@redhat.com>
 | 
			
		||||
Anton Tiurin <noxiouz@yandex.ru>
 | 
			
		||||
Antonio Mercado <amercado@thinknode.com>
 | 
			
		||||
Arnaud Porterie <arnaud.porterie@docker.com>
 | 
			
		||||
Arthur Baars <arthur@semmle.com>
 | 
			
		||||
Avi Miller <avi.miller@oracle.com>
 | 
			
		||||
Ayose Cazorla <ayosec@gmail.com>
 | 
			
		||||
BadZen <dave.trombley@gmail.com>
 | 
			
		||||
Ben Firshman <ben@firshman.co.uk>
 | 
			
		||||
| 
						 | 
				
			
			@ -32,9 +35,10 @@ Derek McGowan <derek@mcgstyle.net>
 | 
			
		|||
Diogo Mónica <diogo.monica@gmail.com>
 | 
			
		||||
Donald Huang <don.hcd@gmail.com>
 | 
			
		||||
Doug Davis <dug@us.ibm.com>
 | 
			
		||||
farmerworking <farmerworking@gmail.com>
 | 
			
		||||
Florentin Raud <florentin.raud@gmail.com>
 | 
			
		||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 | 
			
		||||
harche <harche@users.noreply.github.com>
 | 
			
		||||
harche <p.harshal@gmail.com>
 | 
			
		||||
Henri Gomez <henri.gomez@gmail.com>
 | 
			
		||||
Hu Keping <hukeping@huawei.com>
 | 
			
		||||
Hua Wang <wanghua.humble@gmail.com>
 | 
			
		||||
| 
						 | 
				
			
			@ -42,9 +46,10 @@ Ian Babrou <ibobrik@gmail.com>
 | 
			
		|||
Jack Griffin <jackpg14@gmail.com>
 | 
			
		||||
Jason Freidman <jason.freidman@gmail.com>
 | 
			
		||||
Jeff Nickoloff <jeff@allingeek.com>
 | 
			
		||||
Jessie Frazelle <jfrazelle@users.noreply.github.com>
 | 
			
		||||
Jessie Frazelle <jessie@docker.com>
 | 
			
		||||
Jianqing Wang <tsing@jianqing.org>
 | 
			
		||||
Jon Poler <jonathan.poler@apcera.com>
 | 
			
		||||
Jonathan Boulle <jonathanboulle@gmail.com>
 | 
			
		||||
Jordan Liggitt <jliggitt@redhat.com>
 | 
			
		||||
Josh Hawn <josh.hawn@docker.com>
 | 
			
		||||
Julien Fernandez <julien.fernandez@gmail.com>
 | 
			
		||||
| 
						 | 
				
			
			@ -59,6 +64,7 @@ Matt Moore <mattmoor@google.com>
 | 
			
		|||
Matt Robenolt <matt@ydekproductions.com>
 | 
			
		||||
Michael Prokop <mika@grml.org>
 | 
			
		||||
Miquel Sabaté <msabate@suse.com>
 | 
			
		||||
Morgan Bauer <mbauer@us.ibm.com>
 | 
			
		||||
moxiegirl <mary@docker.com>
 | 
			
		||||
Nathan Sullivan <nathan@nightsys.net>
 | 
			
		||||
nevermosby <robolwq@qq.com>
 | 
			
		||||
| 
						 | 
				
			
			@ -70,8 +76,8 @@ Olivier Jacques <olivier.jacques@hp.com>
 | 
			
		|||
Patrick Devine <patrick.devine@docker.com>
 | 
			
		||||
Philip Misiowiec <philip@atlashealth.com>
 | 
			
		||||
Richard Scothern <richard.scothern@docker.com>
 | 
			
		||||
Rusty Conover <rusty@luckydinosaur.com>
 | 
			
		||||
Sebastiaan van Stijn <github@gone.nl>
 | 
			
		||||
Sharif Nassar <mrwacky42@users.noreply.github.com>
 | 
			
		||||
Sharif Nassar <sharif@mrwacky.com>
 | 
			
		||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
 | 
			
		||||
Shreyas Karnik <karnik.shreyas@gmail.com>
 | 
			
		||||
| 
						 | 
				
			
			@ -81,15 +87,16 @@ Stephen J Day <stephen.day@docker.com>
 | 
			
		|||
Sungho Moon <sungho.moon@navercorp.com>
 | 
			
		||||
Sven Dowideit <SvenDowideit@home.org.au>
 | 
			
		||||
Sylvain Baubeau <sbaubeau@redhat.com>
 | 
			
		||||
Ted Reed <ted.reed@gmail.com>
 | 
			
		||||
tgic <farmer1992@gmail.com>
 | 
			
		||||
Thomas Sjögren <konstruktoid@users.noreply.github.com>
 | 
			
		||||
Tianon Gravi <admwiggin@gmail.com>
 | 
			
		||||
Tibor Vass <teabee89@gmail.com>
 | 
			
		||||
Tonis Tiigi <tonistiigi@gmail.com>
 | 
			
		||||
Troels Thomsen <troels@thomsen.io>
 | 
			
		||||
Vincent Batts <vbatts@redhat.com>
 | 
			
		||||
Vincent Demeester <vincent@sbr.pm>
 | 
			
		||||
Vincent Giersch <vincent.giersch@ovh.net>
 | 
			
		||||
Vincent Giersch <vincent@giersch.fr>
 | 
			
		||||
W. Trevor King <wking@tremily.us>
 | 
			
		||||
xg.song <xg.song@venusource.com>
 | 
			
		||||
xiekeyang <xiekeyang@huawei.com>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,4 +1,4 @@
 | 
			
		|||
FROM golang:1.4
 | 
			
		||||
FROM golang:1.5.2
 | 
			
		||||
 | 
			
		||||
RUN apt-get update && \
 | 
			
		||||
    apt-get install -y librados-dev apache2-utils && \
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,8 +1,58 @@
 | 
			
		|||
Solomon Hykes <solomon@docker.com> (@shykes)
 | 
			
		||||
Olivier Gambier <olivier@docker.com> (@dmp42)
 | 
			
		||||
Stephen Day <stephen.day@docker.com> (@stevvooe)
 | 
			
		||||
Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
 | 
			
		||||
Richard Scothern <richard.scothern@gmail.com> (@richardscothern)
 | 
			
		||||
Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
 | 
			
		||||
# Distribution maintainers file
 | 
			
		||||
#
 | 
			
		||||
# This file describes who runs the docker/distribution project and how.
 | 
			
		||||
# This is a living document - if you see something out of date or missing, speak up!
 | 
			
		||||
#
 | 
			
		||||
# It is structured to be consumable by both humans and programs.
 | 
			
		||||
# To extract its contents programmatically, use any TOML-compliant parser.
 | 
			
		||||
#
 | 
			
		||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
 | 
			
		||||
#
 | 
			
		||||
[Org]
 | 
			
		||||
	[Org."Core maintainers"]
 | 
			
		||||
		people = [
 | 
			
		||||
			"aaronlehmann",
 | 
			
		||||
			"dmcgowan",
 | 
			
		||||
			"dmp42",
 | 
			
		||||
			"richardscothern",
 | 
			
		||||
			"shykes",
 | 
			
		||||
			"stevvooe",
 | 
			
		||||
		]
 | 
			
		||||
 | 
			
		||||
[people]
 | 
			
		||||
 | 
			
		||||
# A reference list of all people associated with the project.
 | 
			
		||||
# All other sections should refer to people by their canonical key
 | 
			
		||||
# in the people section.
 | 
			
		||||
 | 
			
		||||
	# ADD YOURSELF HERE IN ALPHABETICAL ORDER
 | 
			
		||||
 | 
			
		||||
	[people.aaronlehmann]
 | 
			
		||||
	Name = "Aaron Lehmann"
 | 
			
		||||
	Email = "aaron.lehmann@docker.com"
 | 
			
		||||
	GitHub = "aaronlehmann"
 | 
			
		||||
 | 
			
		||||
	[people.dmcgowan]
 | 
			
		||||
	Name = "Derek McGowan"
 | 
			
		||||
	Email = "derek@mcgstyle.net"
 | 
			
		||||
	GitHub = "dmcgowan"
 | 
			
		||||
 | 
			
		||||
	[people.dmp42]
 | 
			
		||||
	Name = "Olivier Gambier"
 | 
			
		||||
	Email = "olivier@docker.com"
 | 
			
		||||
	GitHub = "dmp42"
 | 
			
		||||
 | 
			
		||||
	[people.richardscothern]
 | 
			
		||||
	Name = "Richard Scothern"
 | 
			
		||||
	Email = "richard.scothern@gmail.com"
 | 
			
		||||
	GitHub = "richardscothern"
 | 
			
		||||
 | 
			
		||||
	[people.shykes]
 | 
			
		||||
	Name = "Solomon Hykes"
 | 
			
		||||
	Email = "solomon@docker.com"
 | 
			
		||||
	GitHub = "shykes"
 | 
			
		||||
 | 
			
		||||
	[people.stevvooe]
 | 
			
		||||
	Name = "Stephen Day"
 | 
			
		||||
	Email = "stephen.day@docker.com"
 | 
			
		||||
	GitHub = "stevvooe"
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -17,9 +17,9 @@ This repository contains the following components:
 | 
			
		|||
|**Component**       |Description                                                                                                                                                                                         |
 | 
			
		||||
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
 | 
			
		||||
| **registry**       | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+.                                                                                                  |
 | 
			
		||||
| **libraries**      | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
 | 
			
		||||
| **libraries**      | A rich set of libraries for interacting with,distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
 | 
			
		||||
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec)                                                                                                                        |
 | 
			
		||||
| **documentation**  | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry.                                                                                                                                          |
 | 
			
		||||
| **documentation**  | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry.                                                                                                                                          |
 | 
			
		||||
 | 
			
		||||
### How does this integrate with Docker engine?
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -58,7 +58,7 @@ For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
 | 
			
		|||
### Who needs to deploy a registry?
 | 
			
		||||
 | 
			
		||||
By default, Docker users pull images from Docker's public registry instance.
 | 
			
		||||
[Installing Docker](http://docs.docker.com/installation) gives users this
 | 
			
		||||
[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
 | 
			
		||||
ability. Users can also push images to a repository on Docker's public registry,
 | 
			
		||||
if they have a [Docker Hub](https://hub.docker.com/) account. 
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,6 +61,15 @@ type Descriptor struct {
 | 
			
		|||
	// depend on the simplicity of this type.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Descriptor returns the descriptor, to make it satisfy the Describable
 | 
			
		||||
// interface. Note that implementations of Describable are generally objects
 | 
			
		||||
// which can be described, not simply descriptors; this exception is in place
 | 
			
		||||
// to make it more convenient to pass actual descriptors to functions that
 | 
			
		||||
// expect Describable objects.
 | 
			
		||||
func (d Descriptor) Descriptor() Descriptor {
 | 
			
		||||
	return d
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// BlobStatter makes blob descriptors available by digest. The service may
 | 
			
		||||
// provide a descriptor of a different digest if the provided digest is not
 | 
			
		||||
// canonical.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -6,6 +6,8 @@ machine:
 | 
			
		|||
  # Install ceph to test rados driver & create pool
 | 
			
		||||
    - sudo -i ~/distribution/contrib/ceph/ci-setup.sh
 | 
			
		||||
    - ceph osd pool create docker-distribution 1
 | 
			
		||||
  # Install codecov for coverage
 | 
			
		||||
    - pip install --user codecov
 | 
			
		||||
 | 
			
		||||
  post:
 | 
			
		||||
  # go
 | 
			
		||||
| 
						 | 
				
			
			@ -46,9 +48,6 @@ dependencies:
 | 
			
		|||
      gvm use stable &&
 | 
			
		||||
      go get github.com/axw/gocov/gocov github.com/golang/lint/golint
 | 
			
		||||
 | 
			
		||||
  # Disabling goveralls for now
 | 
			
		||||
  # go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint
 | 
			
		||||
 | 
			
		||||
test:
 | 
			
		||||
  pre:
 | 
			
		||||
  # Output the go versions we are going to test
 | 
			
		||||
| 
						 | 
				
			
			@ -73,25 +72,17 @@ test:
 | 
			
		|||
        pwd: $BASE_STABLE
 | 
			
		||||
 | 
			
		||||
  override:
 | 
			
		||||
 | 
			
		||||
  # Test stable, and report
 | 
			
		||||
  # Preset the goverall report file
 | 
			
		||||
  # - echo "$CIRCLE_PAIN" > ~/goverage.report
 | 
			
		||||
 | 
			
		||||
     - gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out:
 | 
			
		||||
         pwd: $BASE_STABLE
 | 
			
		||||
 | 
			
		||||
     - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}:
 | 
			
		||||
     - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
 | 
			
		||||
         timeout: 600
 | 
			
		||||
         pwd: $BASE_STABLE
 | 
			
		||||
 | 
			
		||||
  post:
 | 
			
		||||
  # Aggregate and report to coveralls
 | 
			
		||||
    - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report:
 | 
			
		||||
  # Report to codecov
 | 
			
		||||
    - bash <(curl -s https://codecov.io/bash):
 | 
			
		||||
        pwd: $BASE_STABLE
 | 
			
		||||
 | 
			
		||||
  ## Notes
 | 
			
		||||
  # Disabled coveralls reporting: build breaking sending coverage data to coveralls
 | 
			
		||||
  # Disabled the -race detector due to massive memory usage.
 | 
			
		||||
  # Do we want these as well?
 | 
			
		||||
  # - go get code.google.com/p/go.tools/cmd/goimports
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										7
									
								
								vendor/src/github.com/docker/distribution/coverpkg.sh
									
										
									
									
										vendored
									
									
										Executable file
									
								
							
							
						
						
									
										7
									
								
								vendor/src/github.com/docker/distribution/coverpkg.sh
									
										
									
									
										vendored
									
									
										Executable file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,7 @@
 | 
			
		|||
#!/usr/bin/env bash
 | 
			
		||||
# Given a subpackage and the containing package, figures out which packages
 | 
			
		||||
# need to be passed to `go test -coverpkg`:  this includes all of the
 | 
			
		||||
# subpackage's dependencies within the containing package, as well as the
 | 
			
		||||
# subpackage itself.
 | 
			
		||||
DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2})"
 | 
			
		||||
echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','
 | 
			
		||||
| 
						 | 
				
			
			@ -1,21 +1,14 @@
 | 
			
		|||
package digest
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"hash"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"regexp"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/docker/pkg/tarsum"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// DigestTarSumV1EmptyTar is the digest for the empty tar file.
 | 
			
		||||
	DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
 | 
			
		||||
 | 
			
		||||
	// DigestSha256EmptyTar is the canonical sha256 digest of empty data
 | 
			
		||||
	DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
 | 
			
		||||
)
 | 
			
		||||
| 
						 | 
				
			
			@ -29,18 +22,21 @@ const (
 | 
			
		|||
//
 | 
			
		||||
// 	sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
 | 
			
		||||
//
 | 
			
		||||
// More important for this code base, this type is compatible with tarsum
 | 
			
		||||
// digests. For example, the following would be a valid Digest:
 | 
			
		||||
//
 | 
			
		||||
// 	tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b
 | 
			
		||||
//
 | 
			
		||||
// This allows to abstract the digest behind this type and work only in those
 | 
			
		||||
// terms.
 | 
			
		||||
type Digest string
 | 
			
		||||
 | 
			
		||||
// NewDigest returns a Digest from alg and a hash.Hash object.
 | 
			
		||||
func NewDigest(alg Algorithm, h hash.Hash) Digest {
 | 
			
		||||
	return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
 | 
			
		||||
	return NewDigestFromBytes(alg, h.Sum(nil))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewDigestFromBytes returns a new digest from the byte contents of p.
 | 
			
		||||
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
 | 
			
		||||
// functions. This is also useful for rebuilding digests from binary
 | 
			
		||||
// serializations.
 | 
			
		||||
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
 | 
			
		||||
	return Digest(fmt.Sprintf("%s:%x", alg, p))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
 | 
			
		||||
| 
						 | 
				
			
			@ -79,41 +75,15 @@ func FromReader(rd io.Reader) (Digest, error) {
 | 
			
		|||
	return Canonical.FromReader(rd)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FromTarArchive produces a tarsum digest from reader rd.
 | 
			
		||||
func FromTarArchive(rd io.Reader) (Digest, error) {
 | 
			
		||||
	ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if _, err := io.Copy(ioutil.Discard, ts); err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	d, err := ParseDigest(ts.Sum(nil))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return d, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FromBytes digests the input and returns a Digest.
 | 
			
		||||
func FromBytes(p []byte) (Digest, error) {
 | 
			
		||||
	return FromReader(bytes.NewReader(p))
 | 
			
		||||
func FromBytes(p []byte) Digest {
 | 
			
		||||
	return Canonical.FromBytes(p)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Validate checks that the contents of d is a valid digest, returning an
 | 
			
		||||
// error if not.
 | 
			
		||||
func (d Digest) Validate() error {
 | 
			
		||||
	s := string(d)
 | 
			
		||||
	// Common case will be tarsum
 | 
			
		||||
	_, err := ParseTarSum(s)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Continue on for general parser
 | 
			
		||||
 | 
			
		||||
	if !DigestRegexpAnchored.MatchString(s) {
 | 
			
		||||
		return ErrDigestInvalidFormat
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2,6 +2,7 @@ package digest
 | 
			
		|||
 | 
			
		||||
import (
 | 
			
		||||
	"crypto"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"hash"
 | 
			
		||||
	"io"
 | 
			
		||||
)
 | 
			
		||||
| 
						 | 
				
			
			@ -16,7 +17,6 @@ const (
 | 
			
		|||
	SHA256 Algorithm = "sha256" // sha256 with hex encoding
 | 
			
		||||
	SHA384 Algorithm = "sha384" // sha384 with hex encoding
 | 
			
		||||
	SHA512 Algorithm = "sha512" // sha512 with hex encoding
 | 
			
		||||
	TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only
 | 
			
		||||
 | 
			
		||||
	// Canonical is the primary digest algorithm used with the distribution
 | 
			
		||||
	// project. Other digests may be used but this one is the primary storage
 | 
			
		||||
| 
						 | 
				
			
			@ -85,11 +85,18 @@ func (a Algorithm) New() Digester {
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Hash returns a new hash as used by the algorithm. If not available, nil is
 | 
			
		||||
// returned. Make sure to check Available before calling.
 | 
			
		||||
// Hash returns a new hash as used by the algorithm. If not available, the
 | 
			
		||||
// method will panic. Check Algorithm.Available() before calling.
 | 
			
		||||
func (a Algorithm) Hash() hash.Hash {
 | 
			
		||||
	if !a.Available() {
 | 
			
		||||
		return nil
 | 
			
		||||
		// NOTE(stevvooe): A missing hash is usually a programming error that
 | 
			
		||||
		// must be resolved at compile time. We don't import in the digest
 | 
			
		||||
		// package to allow users to choose their hash implementation (such as
 | 
			
		||||
		// when using stevvooe/resumable or a hardware accelerated package).
 | 
			
		||||
		//
 | 
			
		||||
		// Applications that may want to resolve the hash at runtime should
 | 
			
		||||
		// call Algorithm.Available before call Algorithm.Hash().
 | 
			
		||||
		panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return algorithms[a].New()
 | 
			
		||||
| 
						 | 
				
			
			@ -106,6 +113,22 @@ func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
 | 
			
		|||
	return digester.Digest(), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FromBytes digests the input and returns a Digest.
 | 
			
		||||
func (a Algorithm) FromBytes(p []byte) Digest {
 | 
			
		||||
	digester := a.New()
 | 
			
		||||
 | 
			
		||||
	if _, err := digester.Hash().Write(p); err != nil {
 | 
			
		||||
		// Writes to a Hash should never fail. None of the existing
 | 
			
		||||
		// hash implementations in the stdlib or hashes vendored
 | 
			
		||||
		// here can return errors from Write. Having a panic in this
 | 
			
		||||
		// condition instead of having FromBytes return an error value
 | 
			
		||||
		// avoids unnecessary error handling paths in all callers.
 | 
			
		||||
		panic("write to hash function returned error: " + err.Error())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return digester.Digest()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TODO(stevvooe): Allow resolution of verifiers using the digest type and
 | 
			
		||||
// this registration system.
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,7 +1,7 @@
 | 
			
		|||
// Package digest provides a generalized type to opaquely represent message
 | 
			
		||||
// digests and their operations within the registry. The Digest type is
 | 
			
		||||
// designed to serve as a flexible identifier in a content-addressable system.
 | 
			
		||||
// More importantly, it provides tools and wrappers to work with tarsums and
 | 
			
		||||
// More importantly, it provides tools and wrappers to work with
 | 
			
		||||
// hash.Hash-based digests with little effort.
 | 
			
		||||
//
 | 
			
		||||
// Basics
 | 
			
		||||
| 
						 | 
				
			
			@ -16,17 +16,7 @@
 | 
			
		|||
// 	sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
 | 
			
		||||
//
 | 
			
		||||
// In this case, the string "sha256" is the algorithm and the hex bytes are
 | 
			
		||||
// the "digest". A tarsum example will be more illustrative of the use case
 | 
			
		||||
// involved in the registry:
 | 
			
		||||
//
 | 
			
		||||
// 	tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b
 | 
			
		||||
//
 | 
			
		||||
// For this, we consider the algorithm to be "tarsum+sha256". Prudent
 | 
			
		||||
// applications will favor the ParseDigest function to verify the format over
 | 
			
		||||
// using simple type casts. However, a normal string can be cast as a digest
 | 
			
		||||
// with a simple type conversion:
 | 
			
		||||
//
 | 
			
		||||
// 	Digest("tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b")
 | 
			
		||||
// the "digest".
 | 
			
		||||
//
 | 
			
		||||
// Because the Digest type is simply a string, once a valid Digest is
 | 
			
		||||
// obtained, comparisons are cheap, quick and simple to express with the
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,70 +0,0 @@
 | 
			
		|||
package digest
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"regexp"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// TarsumRegexp defines a regular expression to match tarsum identifiers.
 | 
			
		||||
var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+")
 | 
			
		||||
 | 
			
		||||
// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with
 | 
			
		||||
// capture groups corresponding to each component.
 | 
			
		||||
var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)")
 | 
			
		||||
 | 
			
		||||
// TarSumInfo contains information about a parsed tarsum.
 | 
			
		||||
type TarSumInfo struct {
 | 
			
		||||
	// Version contains the version of the tarsum.
 | 
			
		||||
	Version string
 | 
			
		||||
 | 
			
		||||
	// Algorithm contains the algorithm for the final digest
 | 
			
		||||
	Algorithm string
 | 
			
		||||
 | 
			
		||||
	// Digest contains the hex-encoded digest.
 | 
			
		||||
	Digest string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InvalidTarSumError provides informations about a TarSum that cannot be parsed
 | 
			
		||||
// by ParseTarSum.
 | 
			
		||||
type InvalidTarSumError string
 | 
			
		||||
 | 
			
		||||
func (e InvalidTarSumError) Error() string {
 | 
			
		||||
	return fmt.Sprintf("invalid tarsum: %q", string(e))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ParseTarSum parses a tarsum string into its components of interest. For
 | 
			
		||||
// example, this method may receive the tarsum in the following format:
 | 
			
		||||
//
 | 
			
		||||
//		tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e
 | 
			
		||||
//
 | 
			
		||||
// The function will return the following:
 | 
			
		||||
//
 | 
			
		||||
//		TarSumInfo{
 | 
			
		||||
//			Version: "v1",
 | 
			
		||||
//			Algorithm: "sha256",
 | 
			
		||||
//			Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
 | 
			
		||||
//		}
 | 
			
		||||
//
 | 
			
		||||
func ParseTarSum(tarSum string) (tsi TarSumInfo, err error) {
 | 
			
		||||
	components := TarsumRegexpCapturing.FindStringSubmatch(tarSum)
 | 
			
		||||
 | 
			
		||||
	if len(components) != 1+TarsumRegexpCapturing.NumSubexp() {
 | 
			
		||||
		return TarSumInfo{}, InvalidTarSumError(tarSum)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return TarSumInfo{
 | 
			
		||||
		Version:   components[3],
 | 
			
		||||
		Algorithm: components[4],
 | 
			
		||||
		Digest:    components[5],
 | 
			
		||||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// String returns the valid, string representation of the tarsum info.
 | 
			
		||||
func (tsi TarSumInfo) String() string {
 | 
			
		||||
	if tsi.Version == "" {
 | 
			
		||||
		return fmt.Sprintf("tarsum+%s:%s", tsi.Algorithm, tsi.Digest)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return fmt.Sprintf("tarsum.%s+%s:%s", tsi.Version, tsi.Algorithm, tsi.Digest)
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -3,9 +3,6 @@ package digest
 | 
			
		|||
import (
 | 
			
		||||
	"hash"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/docker/pkg/tarsum"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Verifier presents a general verification interface to be used with message
 | 
			
		||||
| 
						 | 
				
			
			@ -27,70 +24,10 @@ func NewDigestVerifier(d Digest) (Verifier, error) {
 | 
			
		|||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	alg := d.Algorithm()
 | 
			
		||||
	switch alg {
 | 
			
		||||
	case "sha256", "sha384", "sha512":
 | 
			
		||||
	return hashVerifier{
 | 
			
		||||
			hash:   alg.Hash(),
 | 
			
		||||
		hash:   d.Algorithm().Hash(),
 | 
			
		||||
		digest: d,
 | 
			
		||||
	}, nil
 | 
			
		||||
	default:
 | 
			
		||||
		// Assume we have a tarsum.
 | 
			
		||||
		version, err := tarsum.GetVersionFromTarsum(string(d))
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		pr, pw := io.Pipe()
 | 
			
		||||
 | 
			
		||||
		// TODO(stevvooe): We may actually want to ban the earlier versions of
 | 
			
		||||
		// tarsum. That decision may not be the place of the verifier.
 | 
			
		||||
 | 
			
		||||
		ts, err := tarsum.NewTarSum(pr, true, version)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// TODO(sday): Ick! A goroutine per digest verification? We'll have to
 | 
			
		||||
		// get the tarsum library to export an io.Writer variant.
 | 
			
		||||
		go func() {
 | 
			
		||||
			if _, err := io.Copy(ioutil.Discard, ts); err != nil {
 | 
			
		||||
				pr.CloseWithError(err)
 | 
			
		||||
			} else {
 | 
			
		||||
				pr.Close()
 | 
			
		||||
			}
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
		return &tarsumVerifier{
 | 
			
		||||
			digest: d,
 | 
			
		||||
			ts:     ts,
 | 
			
		||||
			pr:     pr,
 | 
			
		||||
			pw:     pw,
 | 
			
		||||
		}, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewLengthVerifier returns a verifier that returns true when the number of
 | 
			
		||||
// read bytes equals the expected parameter.
 | 
			
		||||
func NewLengthVerifier(expected int64) Verifier {
 | 
			
		||||
	return &lengthVerifier{
 | 
			
		||||
		expected: expected,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type lengthVerifier struct {
 | 
			
		||||
	expected int64 // expected bytes read
 | 
			
		||||
	len      int64 // bytes read
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lv *lengthVerifier) Write(p []byte) (n int, err error) {
 | 
			
		||||
	n = len(p)
 | 
			
		||||
	lv.len += int64(n)
 | 
			
		||||
	return n, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lv *lengthVerifier) Verified() bool {
 | 
			
		||||
	return lv.expected == lv.len
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type hashVerifier struct {
 | 
			
		||||
| 
						 | 
				
			
			@ -105,18 +42,3 @@ func (hv hashVerifier) Write(p []byte) (n int, err error) {
 | 
			
		|||
func (hv hashVerifier) Verified() bool {
 | 
			
		||||
	return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type tarsumVerifier struct {
 | 
			
		||||
	digest Digest
 | 
			
		||||
	ts     tarsum.TarSum
 | 
			
		||||
	pr     *io.PipeReader
 | 
			
		||||
	pw     *io.PipeWriter
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tv *tarsumVerifier) Write(p []byte) (n int, err error) {
 | 
			
		||||
	return tv.pw.Write(p)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tv *tarsumVerifier) Verified() bool {
 | 
			
		||||
	return tv.digest == Digest(tv.ts.Sum(nil))
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -16,6 +16,15 @@ var ErrManifestNotModified = errors.New("manifest not modified")
 | 
			
		|||
// performed
 | 
			
		||||
var ErrUnsupported = errors.New("operation unsupported")
 | 
			
		||||
 | 
			
		||||
// ErrTagUnknown is returned if the given tag is not known by the tag service
 | 
			
		||||
type ErrTagUnknown struct {
 | 
			
		||||
	Tag string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (err ErrTagUnknown) Error() string {
 | 
			
		||||
	return fmt.Sprintf("unknown tag=%s", err.Tag)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ErrRepositoryUnknown is returned if the named repository is not known by
 | 
			
		||||
// the registry.
 | 
			
		||||
type ErrRepositoryUnknown struct {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										147
									
								
								vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										147
									
								
								vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,147 @@
 | 
			
		|||
package manifestlist
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// MediaTypeManifestList specifies the mediaType for manifest lists.
 | 
			
		||||
const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
 | 
			
		||||
 | 
			
		||||
// SchemaVersion provides a pre-initialized version structure for this
 | 
			
		||||
// packages version of the manifest.
 | 
			
		||||
var SchemaVersion = manifest.Versioned{
 | 
			
		||||
	SchemaVersion: 2,
 | 
			
		||||
	MediaType:     MediaTypeManifestList,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
 | 
			
		||||
		m := new(DeserializedManifestList)
 | 
			
		||||
		err := m.UnmarshalJSON(b)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, distribution.Descriptor{}, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		dgst := digest.FromBytes(b)
 | 
			
		||||
		return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err
 | 
			
		||||
	}
 | 
			
		||||
	err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		panic(fmt.Sprintf("Unable to register manifest: %s", err))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PlatformSpec specifies a platform where a particular image manifest is
 | 
			
		||||
// applicable.
 | 
			
		||||
type PlatformSpec struct {
 | 
			
		||||
	// Architecture field specifies the CPU architecture, for example
 | 
			
		||||
	// `amd64` or `ppc64`.
 | 
			
		||||
	Architecture string `json:"architecture"`
 | 
			
		||||
 | 
			
		||||
	// OS specifies the operating system, for example `linux` or `windows`.
 | 
			
		||||
	OS string `json:"os"`
 | 
			
		||||
 | 
			
		||||
	// Variant is an optional field specifying a variant of the CPU, for
 | 
			
		||||
	// example `ppc64le` to specify a little-endian version of a PowerPC CPU.
 | 
			
		||||
	Variant string `json:"variant,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// Features is an optional field specifuing an array of strings, each
 | 
			
		||||
	// listing a required CPU feature (for example `sse4` or `aes`).
 | 
			
		||||
	Features []string `json:"features,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// A ManifestDescriptor references a platform-specific manifest.
 | 
			
		||||
type ManifestDescriptor struct {
 | 
			
		||||
	distribution.Descriptor
 | 
			
		||||
 | 
			
		||||
	// Platform specifies which platform the manifest pointed to by the
 | 
			
		||||
	// descriptor runs on.
 | 
			
		||||
	Platform PlatformSpec `json:"platform"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ManifestList references manifests for various platforms.
 | 
			
		||||
type ManifestList struct {
 | 
			
		||||
	manifest.Versioned
 | 
			
		||||
 | 
			
		||||
	// Config references the image configuration as a blob.
 | 
			
		||||
	Manifests []ManifestDescriptor `json:"manifests"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// References returnes the distribution descriptors for the referenced image
 | 
			
		||||
// manifests.
 | 
			
		||||
func (m ManifestList) References() []distribution.Descriptor {
 | 
			
		||||
	dependencies := make([]distribution.Descriptor, len(m.Manifests))
 | 
			
		||||
	for i := range m.Manifests {
 | 
			
		||||
		dependencies[i] = m.Manifests[i].Descriptor
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return dependencies
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeserializedManifestList wraps ManifestList with a copy of the original
 | 
			
		||||
// JSON.
 | 
			
		||||
type DeserializedManifestList struct {
 | 
			
		||||
	ManifestList
 | 
			
		||||
 | 
			
		||||
	// canonical is the canonical byte representation of the Manifest.
 | 
			
		||||
	canonical []byte
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FromDescriptors takes a slice of descriptors, and returns a
 | 
			
		||||
// DeserializedManifestList which contains the resulting manifest list
 | 
			
		||||
// and its JSON representation.
 | 
			
		||||
func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) {
 | 
			
		||||
	m := ManifestList{
 | 
			
		||||
		Versioned: SchemaVersion,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors))
 | 
			
		||||
	copy(m.Manifests, descriptors)
 | 
			
		||||
 | 
			
		||||
	deserialized := DeserializedManifestList{
 | 
			
		||||
		ManifestList: m,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var err error
 | 
			
		||||
	deserialized.canonical, err = json.MarshalIndent(&m, "", "   ")
 | 
			
		||||
	return &deserialized, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnmarshalJSON populates a new ManifestList struct from JSON data.
 | 
			
		||||
func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {
 | 
			
		||||
	m.canonical = make([]byte, len(b), len(b))
 | 
			
		||||
	// store manifest list in canonical
 | 
			
		||||
	copy(m.canonical, b)
 | 
			
		||||
 | 
			
		||||
	// Unmarshal canonical JSON into ManifestList object
 | 
			
		||||
	var manifestList ManifestList
 | 
			
		||||
	if err := json.Unmarshal(m.canonical, &manifestList); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	m.ManifestList = manifestList
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MarshalJSON returns the contents of canonical. If canonical is empty,
 | 
			
		||||
// marshals the inner contents.
 | 
			
		||||
func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {
 | 
			
		||||
	if len(m.canonical) > 0 {
 | 
			
		||||
		return m.canonical, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil, errors.New("JSON representation not initialized in DeserializedManifestList")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Payload returns the raw content of the manifest list. The contents can be
 | 
			
		||||
// used to calculate the content identifier.
 | 
			
		||||
func (m DeserializedManifestList) Payload() (string, []byte, error) {
 | 
			
		||||
	return m.MediaType, m.canonical, nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										278
									
								
								vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										278
									
								
								vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,278 @@
 | 
			
		|||
package schema1
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"crypto/sha512"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/context"
 | 
			
		||||
	"github.com/docker/libtrust"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type diffID digest.Digest
 | 
			
		||||
 | 
			
		||||
// gzippedEmptyTar is a gzip-compressed version of an empty tar file
 | 
			
		||||
// (1024 NULL bytes)
 | 
			
		||||
var gzippedEmptyTar = []byte{
 | 
			
		||||
	31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
 | 
			
		||||
	0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// digestSHA256GzippedEmptyTar is the canonical sha256 digest of
 | 
			
		||||
// gzippedEmptyTar
 | 
			
		||||
const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
 | 
			
		||||
 | 
			
		||||
// configManifestBuilder is a type for constructing manifests from an image
 | 
			
		||||
// configuration and generic descriptors.
 | 
			
		||||
type configManifestBuilder struct {
 | 
			
		||||
	// bs is a BlobService used to create empty layer tars in the
 | 
			
		||||
	// blob store if necessary.
 | 
			
		||||
	bs distribution.BlobService
 | 
			
		||||
	// pk is the libtrust private key used to sign the final manifest.
 | 
			
		||||
	pk libtrust.PrivateKey
 | 
			
		||||
	// configJSON is configuration supplied when the ManifestBuilder was
 | 
			
		||||
	// created.
 | 
			
		||||
	configJSON []byte
 | 
			
		||||
	// name is the name provided to NewConfigManifestBuilder
 | 
			
		||||
	name string
 | 
			
		||||
	// tag is the tag provided to NewConfigManifestBuilder
 | 
			
		||||
	tag string
 | 
			
		||||
	// descriptors is the set of descriptors referencing the layers.
 | 
			
		||||
	descriptors []distribution.Descriptor
 | 
			
		||||
	// emptyTarDigest is set to a valid digest if an empty tar has been
 | 
			
		||||
	// put in the blob store; otherwise it is empty.
 | 
			
		||||
	emptyTarDigest digest.Digest
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewConfigManifestBuilder is used to build new manifests for the current
 | 
			
		||||
// schema version from an image configuration and a set of descriptors.
 | 
			
		||||
// It takes a BlobService so that it can add an empty tar to the blob store
 | 
			
		||||
// if the resulting manifest needs empty layers.
 | 
			
		||||
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, name, tag string, configJSON []byte) distribution.ManifestBuilder {
 | 
			
		||||
	return &configManifestBuilder{
 | 
			
		||||
		bs:         bs,
 | 
			
		||||
		pk:         pk,
 | 
			
		||||
		configJSON: configJSON,
 | 
			
		||||
		name:       name,
 | 
			
		||||
		tag:        tag,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Build produces a final manifest from the given references
 | 
			
		||||
func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) {
 | 
			
		||||
	type imageRootFS struct {
 | 
			
		||||
		Type      string   `json:"type"`
 | 
			
		||||
		DiffIDs   []diffID `json:"diff_ids,omitempty"`
 | 
			
		||||
		BaseLayer string   `json:"base_layer,omitempty"`
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	type imageHistory struct {
 | 
			
		||||
		Created    time.Time `json:"created"`
 | 
			
		||||
		Author     string    `json:"author,omitempty"`
 | 
			
		||||
		CreatedBy  string    `json:"created_by,omitempty"`
 | 
			
		||||
		Comment    string    `json:"comment,omitempty"`
 | 
			
		||||
		EmptyLayer bool      `json:"empty_layer,omitempty"`
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	type imageConfig struct {
 | 
			
		||||
		RootFS       *imageRootFS   `json:"rootfs,omitempty"`
 | 
			
		||||
		History      []imageHistory `json:"history,omitempty"`
 | 
			
		||||
		Architecture string         `json:"architecture,omitempty"`
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var img imageConfig
 | 
			
		||||
 | 
			
		||||
	if err := json.Unmarshal(mb.configJSON, &img); err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(img.History) == 0 {
 | 
			
		||||
		return nil, errors.New("empty history when trying to create schema1 manifest")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
 | 
			
		||||
		return nil, errors.New("number of descriptors and number of layers in rootfs must match")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Generate IDs for each layer
 | 
			
		||||
	// For non-top-level layers, create fake V1Compatibility strings that
 | 
			
		||||
	// fit the format and don't collide with anything else, but don't
 | 
			
		||||
	// result in runnable images on their own.
 | 
			
		||||
	type v1Compatibility struct {
 | 
			
		||||
		ID              string    `json:"id"`
 | 
			
		||||
		Parent          string    `json:"parent,omitempty"`
 | 
			
		||||
		Comment         string    `json:"comment,omitempty"`
 | 
			
		||||
		Created         time.Time `json:"created"`
 | 
			
		||||
		ContainerConfig struct {
 | 
			
		||||
			Cmd []string
 | 
			
		||||
		} `json:"container_config,omitempty"`
 | 
			
		||||
		ThrowAway bool `json:"throwaway,omitempty"`
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fsLayerList := make([]FSLayer, len(img.History))
 | 
			
		||||
	history := make([]History, len(img.History))
 | 
			
		||||
 | 
			
		||||
	parent := ""
 | 
			
		||||
	layerCounter := 0
 | 
			
		||||
	for i, h := range img.History[:len(img.History)-1] {
 | 
			
		||||
		var blobsum digest.Digest
 | 
			
		||||
		if h.EmptyLayer {
 | 
			
		||||
			if blobsum, err = mb.emptyTar(ctx); err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			if len(img.RootFS.DiffIDs) <= layerCounter {
 | 
			
		||||
				return nil, errors.New("too many non-empty layers in History section")
 | 
			
		||||
			}
 | 
			
		||||
			blobsum = mb.descriptors[layerCounter].Digest
 | 
			
		||||
			layerCounter++
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex()
 | 
			
		||||
 | 
			
		||||
		if i == 0 && img.RootFS.BaseLayer != "" {
 | 
			
		||||
			// windows-only baselayer setup
 | 
			
		||||
			baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer))
 | 
			
		||||
			parent = fmt.Sprintf("%x", baseID[:32])
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		v1Compatibility := v1Compatibility{
 | 
			
		||||
			ID:      v1ID,
 | 
			
		||||
			Parent:  parent,
 | 
			
		||||
			Comment: h.Comment,
 | 
			
		||||
			Created: h.Created,
 | 
			
		||||
		}
 | 
			
		||||
		v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
 | 
			
		||||
		if h.EmptyLayer {
 | 
			
		||||
			v1Compatibility.ThrowAway = true
 | 
			
		||||
		}
 | 
			
		||||
		jsonBytes, err := json.Marshal(&v1Compatibility)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		reversedIndex := len(img.History) - i - 1
 | 
			
		||||
		history[reversedIndex].V1Compatibility = string(jsonBytes)
 | 
			
		||||
		fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum}
 | 
			
		||||
 | 
			
		||||
		parent = v1ID
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	latestHistory := img.History[len(img.History)-1]
 | 
			
		||||
 | 
			
		||||
	var blobsum digest.Digest
 | 
			
		||||
	if latestHistory.EmptyLayer {
 | 
			
		||||
		if blobsum, err = mb.emptyTar(ctx); err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		if len(img.RootFS.DiffIDs) <= layerCounter {
 | 
			
		||||
			return nil, errors.New("too many non-empty layers in History section")
 | 
			
		||||
		}
 | 
			
		||||
		blobsum = mb.descriptors[layerCounter].Digest
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fsLayerList[0] = FSLayer{BlobSum: blobsum}
 | 
			
		||||
	dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON)))
 | 
			
		||||
 | 
			
		||||
	// Top-level v1compatibility string should be a modified version of the
 | 
			
		||||
	// image config.
 | 
			
		||||
	transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	history[0].V1Compatibility = string(transformedConfig)
 | 
			
		||||
 | 
			
		||||
	mfst := Manifest{
 | 
			
		||||
		Versioned: manifest.Versioned{
 | 
			
		||||
			SchemaVersion: 1,
 | 
			
		||||
		},
 | 
			
		||||
		Name:         mb.name,
 | 
			
		||||
		Tag:          mb.tag,
 | 
			
		||||
		Architecture: img.Architecture,
 | 
			
		||||
		FSLayers:     fsLayerList,
 | 
			
		||||
		History:      history,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return Sign(&mfst, mb.pk)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// emptyTar pushes a compressed empty tar to the blob store if one doesn't
 | 
			
		||||
// already exist, and returns its blobsum.
 | 
			
		||||
func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) {
 | 
			
		||||
	if mb.emptyTarDigest != "" {
 | 
			
		||||
		// Already put an empty tar
 | 
			
		||||
		return mb.emptyTarDigest, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar)
 | 
			
		||||
	switch err {
 | 
			
		||||
	case nil:
 | 
			
		||||
		mb.emptyTarDigest = descriptor.Digest
 | 
			
		||||
		return descriptor.Digest, nil
 | 
			
		||||
	case distribution.ErrBlobUnknown:
 | 
			
		||||
		// nop
 | 
			
		||||
	default:
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Add gzipped empty tar to the blob store
 | 
			
		||||
	descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mb.emptyTarDigest = descriptor.Digest
 | 
			
		||||
 | 
			
		||||
	return descriptor.Digest, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AppendReference adds a reference to the current ManifestBuilder
 | 
			
		||||
func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
 | 
			
		||||
	// todo: verification here?
 | 
			
		||||
	mb.descriptors = append(mb.descriptors, d.Descriptor())
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// References returns the current references added to this builder
 | 
			
		||||
func (mb *configManifestBuilder) References() []distribution.Descriptor {
 | 
			
		||||
	return mb.descriptors
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON
 | 
			
		||||
func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
 | 
			
		||||
	// Top-level v1compatibility string should be a modified version of the
 | 
			
		||||
	// image config.
 | 
			
		||||
	var configAsMap map[string]*json.RawMessage
 | 
			
		||||
	if err := json.Unmarshal(configJSON, &configAsMap); err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Delete fields that didn't exist in old manifest
 | 
			
		||||
	delete(configAsMap, "rootfs")
 | 
			
		||||
	delete(configAsMap, "history")
 | 
			
		||||
	configAsMap["id"] = rawJSON(v1ID)
 | 
			
		||||
	if parentV1ID != "" {
 | 
			
		||||
		configAsMap["parent"] = rawJSON(parentV1ID)
 | 
			
		||||
	}
 | 
			
		||||
	if throwaway {
 | 
			
		||||
		configAsMap["throwaway"] = rawJSON(true)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return json.Marshal(configAsMap)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func rawJSON(value interface{}) *json.RawMessage {
 | 
			
		||||
	jsonval, err := json.Marshal(value)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return (*json.RawMessage)(&jsonval)
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -2,20 +2,22 @@ package schema1
 | 
			
		|||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest"
 | 
			
		||||
	"github.com/docker/libtrust"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// TODO(stevvooe): When we rev the manifest format, the contents of this
 | 
			
		||||
// package should be moved to manifest/v1.
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// ManifestMediaType specifies the mediaType for the current version. Note
 | 
			
		||||
	// that for schema version 1, the the media is optionally
 | 
			
		||||
	// "application/json".
 | 
			
		||||
	ManifestMediaType = "application/vnd.docker.distribution.manifest.v1+json"
 | 
			
		||||
	// MediaTypeManifest specifies the mediaType for the current version. Note
 | 
			
		||||
	// that for schema version 1, the the media is optionally "application/json".
 | 
			
		||||
	MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
 | 
			
		||||
	// MediaTypeSignedManifest specifies the mediatype for current SignedManifest version
 | 
			
		||||
	MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
 | 
			
		||||
	// MediaTypeManifestLayer specifies the media type for manifest layers
 | 
			
		||||
	MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
| 
						 | 
				
			
			@ -26,6 +28,47 @@ var (
 | 
			
		|||
	}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
 | 
			
		||||
		sm := new(SignedManifest)
 | 
			
		||||
		err := sm.UnmarshalJSON(b)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, distribution.Descriptor{}, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		desc := distribution.Descriptor{
 | 
			
		||||
			Digest:    digest.FromBytes(sm.Canonical),
 | 
			
		||||
			Size:      int64(len(sm.Canonical)),
 | 
			
		||||
			MediaType: MediaTypeManifest,
 | 
			
		||||
		}
 | 
			
		||||
		return sm, desc, err
 | 
			
		||||
	}
 | 
			
		||||
	err := distribution.RegisterManifestSchema(MediaTypeManifest, schema1Func)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		panic(fmt.Sprintf("Unable to register manifest: %s", err))
 | 
			
		||||
	}
 | 
			
		||||
	err = distribution.RegisterManifestSchema("", schema1Func)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		panic(fmt.Sprintf("Unable to register manifest: %s", err))
 | 
			
		||||
	}
 | 
			
		||||
	err = distribution.RegisterManifestSchema("application/json; charset=utf-8", schema1Func)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		panic(fmt.Sprintf("Unable to register manifest: %s", err))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FSLayer is a container struct for BlobSums defined in an image manifest
 | 
			
		||||
type FSLayer struct {
 | 
			
		||||
	// BlobSum is the tarsum of the referenced filesystem image layer
 | 
			
		||||
	BlobSum digest.Digest `json:"blobSum"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// History stores unstructured v1 compatibility information
 | 
			
		||||
type History struct {
 | 
			
		||||
	// V1Compatibility is the raw v1 compatibility information
 | 
			
		||||
	V1Compatibility string `json:"v1Compatibility"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Manifest provides the base accessible fields for working with V2 image
 | 
			
		||||
// format in the registry.
 | 
			
		||||
type Manifest struct {
 | 
			
		||||
| 
						 | 
				
			
			@ -49,59 +92,64 @@ type Manifest struct {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
// SignedManifest provides an envelope for a signed image manifest, including
 | 
			
		||||
// the format sensitive raw bytes. It contains fields to
 | 
			
		||||
// the format sensitive raw bytes.
 | 
			
		||||
type SignedManifest struct {
 | 
			
		||||
	Manifest
 | 
			
		||||
 | 
			
		||||
	// Raw is the byte representation of the ImageManifest, used for signature
 | 
			
		||||
	// verification. The value of Raw must be used directly during
 | 
			
		||||
	// serialization, or the signature check will fail. The manifest byte
 | 
			
		||||
	// Canonical is the canonical byte representation of the ImageManifest,
 | 
			
		||||
	// without any attached signatures. The manifest byte
 | 
			
		||||
	// representation cannot change or it will have to be re-signed.
 | 
			
		||||
	Raw []byte `json:"-"`
 | 
			
		||||
	Canonical []byte `json:"-"`
 | 
			
		||||
 | 
			
		||||
	// all contains the byte representation of the Manifest including signatures
 | 
			
		||||
	// and is retuend by Payload()
 | 
			
		||||
	all []byte
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnmarshalJSON populates a new ImageManifest struct from JSON data.
 | 
			
		||||
// UnmarshalJSON populates a new SignedManifest struct from JSON data.
 | 
			
		||||
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
 | 
			
		||||
	sm.Raw = make([]byte, len(b), len(b))
 | 
			
		||||
	copy(sm.Raw, b)
 | 
			
		||||
	sm.all = make([]byte, len(b), len(b))
 | 
			
		||||
	// store manifest and signatures in all
 | 
			
		||||
	copy(sm.all, b)
 | 
			
		||||
 | 
			
		||||
	p, err := sm.Payload()
 | 
			
		||||
	jsig, err := libtrust.ParsePrettySignature(b, "signatures")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Resolve the payload in the manifest.
 | 
			
		||||
	bytes, err := jsig.Payload()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// sm.Canonical stores the canonical manifest JSON
 | 
			
		||||
	sm.Canonical = make([]byte, len(bytes), len(bytes))
 | 
			
		||||
	copy(sm.Canonical, bytes)
 | 
			
		||||
 | 
			
		||||
	// Unmarshal canonical JSON into Manifest object
 | 
			
		||||
	var manifest Manifest
 | 
			
		||||
	if err := json.Unmarshal(p, &manifest); err != nil {
 | 
			
		||||
	if err := json.Unmarshal(sm.Canonical, &manifest); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	sm.Manifest = manifest
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Payload returns the raw, signed content of the signed manifest. The
 | 
			
		||||
// contents can be used to calculate the content identifier.
 | 
			
		||||
func (sm *SignedManifest) Payload() ([]byte, error) {
 | 
			
		||||
	jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
// References returnes the descriptors of this manifests references
 | 
			
		||||
func (sm SignedManifest) References() []distribution.Descriptor {
 | 
			
		||||
	dependencies := make([]distribution.Descriptor, len(sm.FSLayers))
 | 
			
		||||
	for i, fsLayer := range sm.FSLayers {
 | 
			
		||||
		dependencies[i] = distribution.Descriptor{
 | 
			
		||||
			MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar",
 | 
			
		||||
			Digest:    fsLayer.BlobSum,
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Resolve the payload in the manifest.
 | 
			
		||||
	return jsig.Payload()
 | 
			
		||||
}
 | 
			
		||||
	return dependencies
 | 
			
		||||
 | 
			
		||||
// Signatures returns the signatures as provided by
 | 
			
		||||
// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
 | 
			
		||||
// signatures.
 | 
			
		||||
func (sm *SignedManifest) Signatures() ([][]byte, error) {
 | 
			
		||||
	jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Resolve the payload in the manifest.
 | 
			
		||||
	return jsig.Signatures()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
 | 
			
		||||
| 
						 | 
				
			
			@ -109,22 +157,28 @@ func (sm *SignedManifest) Signatures() ([][]byte, error) {
 | 
			
		|||
// use Raw directly, since the the content produced by json.Marshal will be
 | 
			
		||||
// compacted and will fail signature checks.
 | 
			
		||||
func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
 | 
			
		||||
	if len(sm.Raw) > 0 {
 | 
			
		||||
		return sm.Raw, nil
 | 
			
		||||
	if len(sm.all) > 0 {
 | 
			
		||||
		return sm.all, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If the raw data is not available, just dump the inner content.
 | 
			
		||||
	return json.Marshal(&sm.Manifest)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FSLayer is a container struct for BlobSums defined in an image manifest
 | 
			
		||||
type FSLayer struct {
 | 
			
		||||
	// BlobSum is the tarsum of the referenced filesystem image layer
 | 
			
		||||
	BlobSum digest.Digest `json:"blobSum"`
 | 
			
		||||
// Payload returns the signed content of the signed manifest.
 | 
			
		||||
func (sm SignedManifest) Payload() (string, []byte, error) {
 | 
			
		||||
	return MediaTypeManifest, sm.all, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// History stores unstructured v1 compatibility information
 | 
			
		||||
type History struct {
 | 
			
		||||
	// V1Compatibility is the raw v1 compatibility information
 | 
			
		||||
	V1Compatibility string `json:"v1Compatibility"`
 | 
			
		||||
// Signatures returns the signatures as provided by
 | 
			
		||||
// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
 | 
			
		||||
// signatures.
 | 
			
		||||
func (sm *SignedManifest) Signatures() ([][]byte, error) {
 | 
			
		||||
	jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Resolve the payload in the manifest.
 | 
			
		||||
	return jsig.Signatures()
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										92
									
								
								vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,92 @@
 | 
			
		|||
package schema1
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"errors"
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/context"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest"
 | 
			
		||||
	"github.com/docker/libtrust"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// referenceManifestBuilder is a type for constructing manifests from schema1
 | 
			
		||||
// dependencies.
 | 
			
		||||
type referenceManifestBuilder struct {
 | 
			
		||||
	Manifest
 | 
			
		||||
	pk libtrust.PrivateKey
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewReferenceManifestBuilder is used to build new manifests for the current
 | 
			
		||||
// schema version using schema1 dependencies.
 | 
			
		||||
func NewReferenceManifestBuilder(pk libtrust.PrivateKey, name, tag, architecture string) distribution.ManifestBuilder {
 | 
			
		||||
	return &referenceManifestBuilder{
 | 
			
		||||
		Manifest: Manifest{
 | 
			
		||||
			Versioned: manifest.Versioned{
 | 
			
		||||
				SchemaVersion: 1,
 | 
			
		||||
			},
 | 
			
		||||
			Name:         name,
 | 
			
		||||
			Tag:          tag,
 | 
			
		||||
			Architecture: architecture,
 | 
			
		||||
		},
 | 
			
		||||
		pk: pk,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) {
 | 
			
		||||
	m := mb.Manifest
 | 
			
		||||
	if len(m.FSLayers) == 0 {
 | 
			
		||||
		return nil, errors.New("cannot build manifest with zero layers or history")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers))
 | 
			
		||||
	m.History = make([]History, len(mb.Manifest.History))
 | 
			
		||||
	copy(m.FSLayers, mb.Manifest.FSLayers)
 | 
			
		||||
	copy(m.History, mb.Manifest.History)
 | 
			
		||||
 | 
			
		||||
	return Sign(&m, mb.pk)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AppendReference adds a reference to the current ManifestBuilder
 | 
			
		||||
func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error {
 | 
			
		||||
	r, ok := d.(Reference)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return fmt.Errorf("Unable to add non-reference type to v1 builder")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Entries need to be prepended
 | 
			
		||||
	mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
 | 
			
		||||
	mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
 | 
			
		||||
	return nil
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// References returns the current references added to this builder
 | 
			
		||||
func (mb *referenceManifestBuilder) References() []distribution.Descriptor {
 | 
			
		||||
	refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers))
 | 
			
		||||
	for i := range mb.Manifest.FSLayers {
 | 
			
		||||
		layerDigest := mb.Manifest.FSLayers[i].BlobSum
 | 
			
		||||
		history := mb.Manifest.History[i]
 | 
			
		||||
		ref := Reference{layerDigest, 0, history}
 | 
			
		||||
		refs[i] = ref.Descriptor()
 | 
			
		||||
	}
 | 
			
		||||
	return refs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Reference describes a manifest v2, schema version 1 dependency.
 | 
			
		||||
// An FSLayer associated with a history entry.
 | 
			
		||||
type Reference struct {
 | 
			
		||||
	Digest  digest.Digest
 | 
			
		||||
	Size    int64 // if we know it, set it for the descriptor.
 | 
			
		||||
	History History
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Descriptor describes a reference
 | 
			
		||||
func (r Reference) Descriptor() distribution.Descriptor {
 | 
			
		||||
	return distribution.Descriptor{
 | 
			
		||||
		MediaType: MediaTypeManifestLayer,
 | 
			
		||||
		Digest:    r.Digest,
 | 
			
		||||
		Size:      r.Size,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -32,7 +32,8 @@ func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) {
 | 
			
		|||
 | 
			
		||||
	return &SignedManifest{
 | 
			
		||||
		Manifest:  *m,
 | 
			
		||||
		Raw:      pretty,
 | 
			
		||||
		all:       pretty,
 | 
			
		||||
		Canonical: p,
 | 
			
		||||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -61,6 +62,7 @@ func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certifica
 | 
			
		|||
 | 
			
		||||
	return &SignedManifest{
 | 
			
		||||
		Manifest:  *m,
 | 
			
		||||
		Raw:      pretty,
 | 
			
		||||
		all:       pretty,
 | 
			
		||||
		Canonical: p,
 | 
			
		||||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -10,7 +10,7 @@ import (
 | 
			
		|||
// Verify verifies the signature of the signed manifest returning the public
 | 
			
		||||
// keys used during signing.
 | 
			
		||||
func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
 | 
			
		||||
	js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
 | 
			
		||||
	js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		logrus.WithField("err", err).Debugf("(*SignedManifest).Verify")
 | 
			
		||||
		return nil, err
 | 
			
		||||
| 
						 | 
				
			
			@ -23,7 +23,7 @@ func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
 | 
			
		|||
// certificate pool returning the list of verified chains. Signatures without
 | 
			
		||||
// an x509 chain are not checked.
 | 
			
		||||
func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) {
 | 
			
		||||
	js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
 | 
			
		||||
	js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										74
									
								
								vendor/src/github.com/docker/distribution/manifest/schema2/builder.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										74
									
								
								vendor/src/github.com/docker/distribution/manifest/schema2/builder.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,74 @@
 | 
			
		|||
package schema2
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/context"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// builder is a type for constructing manifests.
 | 
			
		||||
type builder struct {
 | 
			
		||||
	// bs is a BlobService used to publish the configuration blob.
 | 
			
		||||
	bs distribution.BlobService
 | 
			
		||||
 | 
			
		||||
	// configJSON references
 | 
			
		||||
	configJSON []byte
 | 
			
		||||
 | 
			
		||||
	// layers is a list of layer descriptors that gets built by successive
 | 
			
		||||
	// calls to AppendReference.
 | 
			
		||||
	layers []distribution.Descriptor
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewManifestBuilder is used to build new manifests for the current schema
 | 
			
		||||
// version. It takes a BlobService so it can publish the configuration blob
 | 
			
		||||
// as part of the Build process.
 | 
			
		||||
func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder {
 | 
			
		||||
	mb := &builder{
 | 
			
		||||
		bs:         bs,
 | 
			
		||||
		configJSON: make([]byte, len(configJSON)),
 | 
			
		||||
	}
 | 
			
		||||
	copy(mb.configJSON, configJSON)
 | 
			
		||||
 | 
			
		||||
	return mb
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Build produces a final manifest from the given references.
 | 
			
		||||
func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
 | 
			
		||||
	m := Manifest{
 | 
			
		||||
		Versioned: SchemaVersion,
 | 
			
		||||
		Layers:    make([]distribution.Descriptor, len(mb.layers)),
 | 
			
		||||
	}
 | 
			
		||||
	copy(m.Layers, mb.layers)
 | 
			
		||||
 | 
			
		||||
	configDigest := digest.FromBytes(mb.configJSON)
 | 
			
		||||
 | 
			
		||||
	var err error
 | 
			
		||||
	m.Config, err = mb.bs.Stat(ctx, configDigest)
 | 
			
		||||
	switch err {
 | 
			
		||||
	case nil:
 | 
			
		||||
		return FromStruct(m)
 | 
			
		||||
	case distribution.ErrBlobUnknown:
 | 
			
		||||
		// nop
 | 
			
		||||
	default:
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Add config to the blob store
 | 
			
		||||
	m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return FromStruct(m)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AppendReference adds a reference to the current ManifestBuilder.
 | 
			
		||||
func (mb *builder) AppendReference(d distribution.Describable) error {
 | 
			
		||||
	mb.layers = append(mb.layers, d.Descriptor())
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// References returns the current references added to this builder.
 | 
			
		||||
func (mb *builder) References() []distribution.Descriptor {
 | 
			
		||||
	return mb.layers
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										125
									
								
								vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,125 @@
 | 
			
		|||
package schema2
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// MediaTypeManifest specifies the mediaType for the current version.
 | 
			
		||||
	MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
 | 
			
		||||
 | 
			
		||||
	// MediaTypeConfig specifies the mediaType for the image configuration.
 | 
			
		||||
	MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
 | 
			
		||||
 | 
			
		||||
	// MediaTypeLayer is the mediaType used for layers referenced by the
 | 
			
		||||
	// manifest.
 | 
			
		||||
	MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// SchemaVersion provides a pre-initialized version structure for this
 | 
			
		||||
	// packages version of the manifest.
 | 
			
		||||
	SchemaVersion = manifest.Versioned{
 | 
			
		||||
		SchemaVersion: 2,
 | 
			
		||||
		MediaType:     MediaTypeManifest,
 | 
			
		||||
	}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
 | 
			
		||||
		m := new(DeserializedManifest)
 | 
			
		||||
		err := m.UnmarshalJSON(b)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, distribution.Descriptor{}, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		dgst := digest.FromBytes(b)
 | 
			
		||||
		return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
 | 
			
		||||
	}
 | 
			
		||||
	err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		panic(fmt.Sprintf("Unable to register manifest: %s", err))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Manifest defines a schema2 manifest.
 | 
			
		||||
type Manifest struct {
 | 
			
		||||
	manifest.Versioned
 | 
			
		||||
 | 
			
		||||
	// Config references the image configuration as a blob.
 | 
			
		||||
	Config distribution.Descriptor `json:"config"`
 | 
			
		||||
 | 
			
		||||
	// Layers lists descriptors for the layers referenced by the
 | 
			
		||||
	// configuration.
 | 
			
		||||
	Layers []distribution.Descriptor `json:"layers"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// References returnes the descriptors of this manifests references.
 | 
			
		||||
func (m Manifest) References() []distribution.Descriptor {
 | 
			
		||||
	return m.Layers
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Target returns the target of this signed manifest.
 | 
			
		||||
func (m Manifest) Target() distribution.Descriptor {
 | 
			
		||||
	return m.Config
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeserializedManifest wraps Manifest with a copy of the original JSON.
 | 
			
		||||
// It satisfies the distribution.Manifest interface.
 | 
			
		||||
type DeserializedManifest struct {
 | 
			
		||||
	Manifest
 | 
			
		||||
 | 
			
		||||
	// canonical is the canonical byte representation of the Manifest.
 | 
			
		||||
	canonical []byte
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
 | 
			
		||||
// DeserializedManifest which contains the manifest and its JSON representation.
 | 
			
		||||
func FromStruct(m Manifest) (*DeserializedManifest, error) {
 | 
			
		||||
	var deserialized DeserializedManifest
 | 
			
		||||
	deserialized.Manifest = m
 | 
			
		||||
 | 
			
		||||
	var err error
 | 
			
		||||
	deserialized.canonical, err = json.MarshalIndent(&m, "", "   ")
 | 
			
		||||
	return &deserialized, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnmarshalJSON populates a new Manifest struct from JSON data.
 | 
			
		||||
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
 | 
			
		||||
	m.canonical = make([]byte, len(b), len(b))
 | 
			
		||||
	// store manifest in canonical
 | 
			
		||||
	copy(m.canonical, b)
 | 
			
		||||
 | 
			
		||||
	// Unmarshal canonical JSON into Manifest object
 | 
			
		||||
	var manifest Manifest
 | 
			
		||||
	if err := json.Unmarshal(m.canonical, &manifest); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	m.Manifest = manifest
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MarshalJSON returns the contents of canonical. If canonical is empty,
 | 
			
		||||
// marshals the inner contents.
 | 
			
		||||
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
 | 
			
		||||
	if len(m.canonical) > 0 {
 | 
			
		||||
		return m.canonical, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil, errors.New("JSON representation not initialized in DeserializedManifest")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Payload returns the raw content of the manifest. The contents can be used to
 | 
			
		||||
// calculate the content identifier.
 | 
			
		||||
func (m DeserializedManifest) Payload() (string, []byte, error) {
 | 
			
		||||
	return m.MediaType, m.canonical, nil
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1,9 +1,12 @@
 | 
			
		|||
package manifest
 | 
			
		||||
 | 
			
		||||
// Versioned provides a struct with just the manifest schemaVersion. Incoming
 | 
			
		||||
// Versioned provides a struct with the manifest schemaVersion and . Incoming
 | 
			
		||||
// content with unknown schema version can be decoded against this struct to
 | 
			
		||||
// check the version.
 | 
			
		||||
type Versioned struct {
 | 
			
		||||
	// SchemaVersion is the image manifest schema that this image follows
 | 
			
		||||
	SchemaVersion int `json:"schemaVersion"`
 | 
			
		||||
 | 
			
		||||
	// MediaType is the media type of this schema.
 | 
			
		||||
	MediaType string `json:"mediaType,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										100
									
								
								vendor/src/github.com/docker/distribution/manifests.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								vendor/src/github.com/docker/distribution/manifests.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,100 @@
 | 
			
		|||
package distribution
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/docker/distribution/context"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Manifest represents a registry object specifying a set of
 | 
			
		||||
// references and an optional target
 | 
			
		||||
type Manifest interface {
 | 
			
		||||
	// References returns a list of objects which make up this manifest.
 | 
			
		||||
	// The references are strictly ordered from base to head. A reference
 | 
			
		||||
	// is anything which can be represented by a distribution.Descriptor
 | 
			
		||||
	References() []Descriptor
 | 
			
		||||
 | 
			
		||||
	// Payload provides the serialized format of the manifest, in addition to
 | 
			
		||||
	// the mediatype.
 | 
			
		||||
	Payload() (mediatype string, payload []byte, err error)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ManifestBuilder creates a manifest allowing one to include dependencies.
 | 
			
		||||
// Instances can be obtained from a version-specific manifest package.  Manifest
 | 
			
		||||
// specific data is passed into the function which creates the builder.
 | 
			
		||||
type ManifestBuilder interface {
 | 
			
		||||
	// Build creates the manifest from his builder.
 | 
			
		||||
	Build(ctx context.Context) (Manifest, error)
 | 
			
		||||
 | 
			
		||||
	// References returns a list of objects which have been added to this
 | 
			
		||||
	// builder. The dependencies are returned in the order they were added,
 | 
			
		||||
	// which should be from base to head.
 | 
			
		||||
	References() []Descriptor
 | 
			
		||||
 | 
			
		||||
	// AppendReference includes the given object in the manifest after any
 | 
			
		||||
	// existing dependencies. If the add fails, such as when adding an
 | 
			
		||||
	// unsupported dependency, an error may be returned.
 | 
			
		||||
	AppendReference(dependency Describable) error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ManifestService describes operations on image manifests.
 | 
			
		||||
type ManifestService interface {
 | 
			
		||||
	// Exists returns true if the manifest exists.
 | 
			
		||||
	Exists(ctx context.Context, dgst digest.Digest) (bool, error)
 | 
			
		||||
 | 
			
		||||
	// Get retrieves the manifest specified by the given digest
 | 
			
		||||
	Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
 | 
			
		||||
 | 
			
		||||
	// Put creates or updates the given manifest returning the manifest digest
 | 
			
		||||
	Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
 | 
			
		||||
 | 
			
		||||
	// Delete removes the manifest specified by the given digest. Deleting
 | 
			
		||||
	// a manifest that doesn't exist will return ErrManifestNotFound
 | 
			
		||||
	Delete(ctx context.Context, dgst digest.Digest) error
 | 
			
		||||
 | 
			
		||||
	// Enumerate fills 'manifests' with the manifests in this service up
 | 
			
		||||
	// to the size of 'manifests' and returns 'n' for the number of entries
 | 
			
		||||
	// which were filled.  'last' contains an offset in the manifest set
 | 
			
		||||
	// and can be used to resume iteration.
 | 
			
		||||
	//Enumerate(ctx context.Context, manifests []Manifest, last Manifest) (n int, err error)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Describable is an interface for descriptors
 | 
			
		||||
type Describable interface {
 | 
			
		||||
	Descriptor() Descriptor
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ManifestMediaTypes returns the supported media types for manifests.
 | 
			
		||||
func ManifestMediaTypes() (mediaTypes []string) {
 | 
			
		||||
	for t := range mappings {
 | 
			
		||||
		mediaTypes = append(mediaTypes, t)
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnmarshalFunc implements manifest unmarshalling a given MediaType
 | 
			
		||||
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
 | 
			
		||||
 | 
			
		||||
var mappings = make(map[string]UnmarshalFunc, 0)
 | 
			
		||||
 | 
			
		||||
// UnmarshalManifest looks up manifest unmarshall functions based on
 | 
			
		||||
// MediaType
 | 
			
		||||
func UnmarshalManifest(mediatype string, p []byte) (Manifest, Descriptor, error) {
 | 
			
		||||
	unmarshalFunc, ok := mappings[mediatype]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype: %s", mediatype)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return unmarshalFunc(p)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RegisterManifestSchema registers an UnmarshalFunc for a given schema type.  This
 | 
			
		||||
// should be called from specific
 | 
			
		||||
func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error {
 | 
			
		||||
	if _, ok := mappings[mediatype]; ok {
 | 
			
		||||
		return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype)
 | 
			
		||||
	}
 | 
			
		||||
	mappings[mediatype] = u
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -4,22 +4,16 @@
 | 
			
		|||
// Grammar
 | 
			
		||||
//
 | 
			
		||||
// 	reference                       := repository [ ":" tag ] [ "@" digest ]
 | 
			
		||||
//
 | 
			
		||||
//	// repository.go
 | 
			
		||||
//	repository			:= hostname ['/' component]+
 | 
			
		||||
//	hostname			:= hostcomponent [':' port-number]
 | 
			
		||||
//	component			:= subcomponent [separator subcomponent]*
 | 
			
		||||
//	subcomponent			:= alpha-numeric ['-'* alpha-numeric]*
 | 
			
		||||
//	hostcomponent                   := [hostpart '.']* hostpart
 | 
			
		||||
// 	alpha-numeric			:= /[a-z0-9]+/
 | 
			
		||||
//	separator			:= /([_.]|__)/
 | 
			
		||||
//	name                            := [hostname '/'] component ['/' component]*
 | 
			
		||||
//	hostname                        := hostcomponent ['.' hostcomponent]* [':' port-number]
 | 
			
		||||
//	hostcomponent                   := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/
 | 
			
		||||
//	port-number                     := /[0-9]+/
 | 
			
		||||
//	hostpart                        := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/
 | 
			
		||||
//	component                       := alpha-numeric [separator alpha-numeric]*
 | 
			
		||||
// 	alpha-numeric                   := /[a-z0-9]+/
 | 
			
		||||
//	separator                       := /[_.]|__|[-]*/
 | 
			
		||||
//
 | 
			
		||||
//	// tag.go
 | 
			
		||||
//	tag                             := /[\w][\w.-]{0,127}/
 | 
			
		||||
//
 | 
			
		||||
//	// from the digest package
 | 
			
		||||
//	digest                          := digest-algorithm ":" digest-hex
 | 
			
		||||
//	digest-algorithm                := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
 | 
			
		||||
//	digest-algorithm-separator      := /[+.-_]/
 | 
			
		||||
| 
						 | 
				
			
			@ -52,8 +46,7 @@ var (
 | 
			
		|||
	// ErrNameEmpty is returned for empty, invalid repository names.
 | 
			
		||||
	ErrNameEmpty = errors.New("repository name must have at least one component")
 | 
			
		||||
 | 
			
		||||
	// ErrNameTooLong is returned when a repository name is longer than
 | 
			
		||||
	// RepositoryNameTotalLengthMax
 | 
			
		||||
	// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
 | 
			
		||||
	ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3,47 +3,122 @@ package reference
 | 
			
		|||
import "regexp"
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// nameSubComponentRegexp defines the part of the name which must be
 | 
			
		||||
	// begin and end with an alphanumeric character. These characters can
 | 
			
		||||
	// be separated by any number of dashes.
 | 
			
		||||
	nameSubComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[-]+[a-z0-9]+)*`)
 | 
			
		||||
	// alphaNumericRegexp defines the alpha numeric atom, typically a
 | 
			
		||||
	// component of names. This only allows lower case characters and digits.
 | 
			
		||||
	alphaNumericRegexp = match(`[a-z0-9]+`)
 | 
			
		||||
 | 
			
		||||
	// nameComponentRegexp restricts registry path component names to
 | 
			
		||||
	// start with at least one letter or number, with following parts able to
 | 
			
		||||
	// be separated by one period, underscore or double underscore.
 | 
			
		||||
	nameComponentRegexp = regexp.MustCompile(nameSubComponentRegexp.String() + `(?:(?:[._]|__)` + nameSubComponentRegexp.String() + `)*`)
 | 
			
		||||
	// separatorRegexp defines the separators allowed to be embedded in name
 | 
			
		||||
	// components. This allow one period, one or two underscore and multiple
 | 
			
		||||
	// dashes.
 | 
			
		||||
	separatorRegexp = match(`(?:[._]|__|[-]*)`)
 | 
			
		||||
 | 
			
		||||
	nameRegexp = regexp.MustCompile(`(?:` + nameComponentRegexp.String() + `/)*` + nameComponentRegexp.String())
 | 
			
		||||
	// nameComponentRegexp restricts registry path component names to start
 | 
			
		||||
	// with at least one letter or number, with following parts able to be
 | 
			
		||||
	// separated by one period, one or two underscore and multiple dashes.
 | 
			
		||||
	nameComponentRegexp = expression(
 | 
			
		||||
		alphaNumericRegexp,
 | 
			
		||||
		optional(repeated(separatorRegexp, alphaNumericRegexp)))
 | 
			
		||||
 | 
			
		||||
	hostnameComponentRegexp = regexp.MustCompile(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`)
 | 
			
		||||
	// hostnameComponentRegexp restricts the registry hostname component of a
 | 
			
		||||
	// repository name to start with a component as defined by hostnameRegexp
 | 
			
		||||
	// and followed by an optional port.
 | 
			
		||||
	hostnameComponentRegexp = match(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`)
 | 
			
		||||
 | 
			
		||||
	// hostnameComponentRegexp restricts the registry hostname component of a repository name to
 | 
			
		||||
	// start with a component as defined by hostnameRegexp and followed by an optional port.
 | 
			
		||||
	hostnameRegexp = regexp.MustCompile(`(?:` + hostnameComponentRegexp.String() + `\.)*` + hostnameComponentRegexp.String() + `(?::[0-9]+)?`)
 | 
			
		||||
	// hostnameRegexp defines the structure of potential hostname components
 | 
			
		||||
	// that may be part of image names. This is purposely a subset of what is
 | 
			
		||||
	// allowed by DNS to ensure backwards compatibility with Docker image
 | 
			
		||||
	// names.
 | 
			
		||||
	hostnameRegexp = expression(
 | 
			
		||||
		hostnameComponentRegexp,
 | 
			
		||||
		optional(repeated(literal(`.`), hostnameComponentRegexp)),
 | 
			
		||||
		optional(literal(`:`), match(`[0-9]+`)))
 | 
			
		||||
 | 
			
		||||
	// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
 | 
			
		||||
	TagRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`)
 | 
			
		||||
	TagRegexp = match(`[\w][\w.-]{0,127}`)
 | 
			
		||||
 | 
			
		||||
	// anchoredTagRegexp matches valid tag names, anchored at the start and
 | 
			
		||||
	// end of the matched string.
 | 
			
		||||
	anchoredTagRegexp = regexp.MustCompile(`^` + TagRegexp.String() + `$`)
 | 
			
		||||
	anchoredTagRegexp = anchored(TagRegexp)
 | 
			
		||||
 | 
			
		||||
	// DigestRegexp matches valid digests.
 | 
			
		||||
	DigestRegexp = regexp.MustCompile(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
 | 
			
		||||
	DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
 | 
			
		||||
 | 
			
		||||
	// anchoredDigestRegexp matches valid digests, anchored at the start and
 | 
			
		||||
	// end of the matched string.
 | 
			
		||||
	anchoredDigestRegexp = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
 | 
			
		||||
	anchoredDigestRegexp = anchored(DigestRegexp)
 | 
			
		||||
 | 
			
		||||
	// NameRegexp is the format for the name component of references. The
 | 
			
		||||
	// regexp has capturing groups for the hostname and name part omitting
 | 
			
		||||
	// the seperating forward slash from either.
 | 
			
		||||
	NameRegexp = regexp.MustCompile(`(?:` + hostnameRegexp.String() + `/)?` + nameRegexp.String())
 | 
			
		||||
	NameRegexp = expression(
 | 
			
		||||
		optional(hostnameRegexp, literal(`/`)),
 | 
			
		||||
		nameComponentRegexp,
 | 
			
		||||
		optional(repeated(literal(`/`), nameComponentRegexp)))
 | 
			
		||||
 | 
			
		||||
	// ReferenceRegexp is the full supported format of a reference. The
 | 
			
		||||
	// regexp has capturing groups for name, tag, and digest components.
 | 
			
		||||
	ReferenceRegexp = regexp.MustCompile(`^((?:` + hostnameRegexp.String() + `/)?` + nameRegexp.String() + `)(?:[:](` + TagRegexp.String() + `))?(?:[@](` + DigestRegexp.String() + `))?$`)
 | 
			
		||||
	// anchoredNameRegexp is used to parse a name value, capturing the
 | 
			
		||||
	// hostname and trailing components.
 | 
			
		||||
	anchoredNameRegexp = anchored(
 | 
			
		||||
		optional(capture(hostnameRegexp), literal(`/`)),
 | 
			
		||||
		capture(nameComponentRegexp,
 | 
			
		||||
			optional(repeated(literal(`/`), nameComponentRegexp))))
 | 
			
		||||
 | 
			
		||||
	// anchoredNameRegexp is used to parse a name value, capturing hostname
 | 
			
		||||
	anchoredNameRegexp = regexp.MustCompile(`^(?:(` + hostnameRegexp.String() + `)/)?(` + nameRegexp.String() + `)$`)
 | 
			
		||||
	// ReferenceRegexp is the full supported format of a reference. The regexp
 | 
			
		||||
	// is anchored and has capturing groups for name, tag, and digest
 | 
			
		||||
	// components.
 | 
			
		||||
	ReferenceRegexp = anchored(capture(NameRegexp),
 | 
			
		||||
		optional(literal(":"), capture(TagRegexp)),
 | 
			
		||||
		optional(literal("@"), capture(DigestRegexp)))
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// match compiles the string to a regular expression.
 | 
			
		||||
var match = regexp.MustCompile
 | 
			
		||||
 | 
			
		||||
// literal compiles s into a literal regular expression, escaping any regexp
 | 
			
		||||
// reserved characters.
 | 
			
		||||
func literal(s string) *regexp.Regexp {
 | 
			
		||||
	re := match(regexp.QuoteMeta(s))
 | 
			
		||||
 | 
			
		||||
	if _, complete := re.LiteralPrefix(); !complete {
 | 
			
		||||
		panic("must be a literal")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return re
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// expression defines a full expression, where each regular expression must
 | 
			
		||||
// follow the previous.
 | 
			
		||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
 | 
			
		||||
	var s string
 | 
			
		||||
	for _, re := range res {
 | 
			
		||||
		s += re.String()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return match(s)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// optional wraps the expression in a non-capturing group and makes the
 | 
			
		||||
// production optional.
 | 
			
		||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
 | 
			
		||||
	return match(group(expression(res...)).String() + `?`)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// repeated wraps the regexp in a non-capturing group to get one or more
 | 
			
		||||
// matches.
 | 
			
		||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
 | 
			
		||||
	return match(group(expression(res...)).String() + `+`)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// group wraps the regexp in a non-capturing group.
 | 
			
		||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
 | 
			
		||||
	return match(`(?:` + expression(res...).String() + `)`)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// capture wraps the expression in a capturing group.
 | 
			
		||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
 | 
			
		||||
	return match(`(` + expression(res...).String() + `)`)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// anchored anchors the regular expression by adding start and end delimiters.
 | 
			
		||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
 | 
			
		||||
	return match(`^` + expression(res...).String() + `$`)
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2,8 +2,6 @@ package distribution
 | 
			
		|||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/docker/distribution/context"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Scope defines the set of items that match a namespace.
 | 
			
		||||
| 
						 | 
				
			
			@ -44,7 +42,9 @@ type Namespace interface {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
// ManifestServiceOption is a function argument for Manifest Service methods
 | 
			
		||||
type ManifestServiceOption func(ManifestService) error
 | 
			
		||||
type ManifestServiceOption interface {
 | 
			
		||||
	Apply(ManifestService) error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Repository is a named collection of manifests and layers.
 | 
			
		||||
type Repository interface {
 | 
			
		||||
| 
						 | 
				
			
			@ -62,59 +62,10 @@ type Repository interface {
 | 
			
		|||
	// be a BlobService for use with clients. This will allow such
 | 
			
		||||
	// implementations to avoid implementing ServeBlob.
 | 
			
		||||
 | 
			
		||||
	// Signatures returns a reference to this repository's signatures service.
 | 
			
		||||
	Signatures() SignatureService
 | 
			
		||||
	// Tags returns a reference to this repositories tag service
 | 
			
		||||
	Tags(ctx context.Context) TagService
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TODO(stevvooe): Must add close methods to all these. May want to change the
 | 
			
		||||
// way instances are created to better reflect internal dependency
 | 
			
		||||
// relationships.
 | 
			
		||||
 | 
			
		||||
// ManifestService provides operations on image manifests.
 | 
			
		||||
type ManifestService interface {
 | 
			
		||||
	// Exists returns true if the manifest exists.
 | 
			
		||||
	Exists(dgst digest.Digest) (bool, error)
 | 
			
		||||
 | 
			
		||||
	// Get retrieves the identified by the digest, if it exists.
 | 
			
		||||
	Get(dgst digest.Digest) (*schema1.SignedManifest, error)
 | 
			
		||||
 | 
			
		||||
	// Delete removes the manifest, if it exists.
 | 
			
		||||
	Delete(dgst digest.Digest) error
 | 
			
		||||
 | 
			
		||||
	// Put creates or updates the manifest.
 | 
			
		||||
	Put(manifest *schema1.SignedManifest) error
 | 
			
		||||
 | 
			
		||||
	// TODO(stevvooe): The methods after this message should be moved to a
 | 
			
		||||
	// discrete TagService, per active proposals.
 | 
			
		||||
 | 
			
		||||
	// Tags lists the tags under the named repository.
 | 
			
		||||
	Tags() ([]string, error)
 | 
			
		||||
 | 
			
		||||
	// ExistsByTag returns true if the manifest exists.
 | 
			
		||||
	ExistsByTag(tag string) (bool, error)
 | 
			
		||||
 | 
			
		||||
	// GetByTag retrieves the named manifest, if it exists.
 | 
			
		||||
	GetByTag(tag string, options ...ManifestServiceOption) (*schema1.SignedManifest, error)
 | 
			
		||||
 | 
			
		||||
	// TODO(stevvooe): There are several changes that need to be done to this
 | 
			
		||||
	// interface:
 | 
			
		||||
	//
 | 
			
		||||
	//	1. Allow explicit tagging with Tag(digest digest.Digest, tag string)
 | 
			
		||||
	//	2. Support reading tags with a re-entrant reader to avoid large
 | 
			
		||||
	//       allocations in the registry.
 | 
			
		||||
	//	3. Long-term: Provide All() method that lets one scroll through all of
 | 
			
		||||
	//       the manifest entries.
 | 
			
		||||
	//	4. Long-term: break out concept of signing from manifests. This is
 | 
			
		||||
	//       really a part of the distribution sprint.
 | 
			
		||||
	//	5. Long-term: Manifest should be an interface. This code shouldn't
 | 
			
		||||
	//       really be concerned with the storage format.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SignatureService provides operations on signatures.
 | 
			
		||||
type SignatureService interface {
 | 
			
		||||
	// Get retrieves all of the signature blobs for the specified digest.
 | 
			
		||||
	Get(dgst digest.Digest) ([][]byte, error)
 | 
			
		||||
 | 
			
		||||
	// Put stores the signature for the provided digest.
 | 
			
		||||
	Put(dgst digest.Digest, signatures ...[]byte) error
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -25,7 +25,8 @@ func (ec ErrorCode) ErrorCode() ErrorCode {
 | 
			
		|||
 | 
			
		||||
// Error returns the ID/Value
 | 
			
		||||
func (ec ErrorCode) Error() string {
 | 
			
		||||
	return ec.Descriptor().Value
 | 
			
		||||
	// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
 | 
			
		||||
	return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Descriptor returns the descriptor for the error code.
 | 
			
		||||
| 
						 | 
				
			
			@ -104,9 +105,7 @@ func (e Error) ErrorCode() ErrorCode {
 | 
			
		|||
 | 
			
		||||
// Error returns a human readable representation of the error.
 | 
			
		||||
func (e Error) Error() string {
 | 
			
		||||
	return fmt.Sprintf("%s: %s",
 | 
			
		||||
		strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)),
 | 
			
		||||
		e.Message)
 | 
			
		||||
	return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithDetail will return a new Error, based on the current one, but with
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -495,7 +495,7 @@ var routeDescriptors = []RouteDescriptor{
 | 
			
		|||
		Methods: []MethodDescriptor{
 | 
			
		||||
			{
 | 
			
		||||
				Method:      "GET",
 | 
			
		||||
				Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.",
 | 
			
		||||
				Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
 | 
			
		||||
				Requests: []RequestDescriptor{
 | 
			
		||||
					{
 | 
			
		||||
						Headers: []ParameterDescriptor{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -204,7 +204,9 @@ func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
 | 
			
		|||
		routeURL.Path = routeURL.Path[1:]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return cr.root.ResolveReference(routeURL), nil
 | 
			
		||||
	url := cr.root.ResolveReference(routeURL)
 | 
			
		||||
	url.Scheme = cr.root.Scheme
 | 
			
		||||
	return url, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// appendValuesURL appends the parameters to the url.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -240,7 +240,8 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon
 | 
			
		|||
	defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
	if !client.SuccessStatus(resp.StatusCode) {
 | 
			
		||||
		return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode))
 | 
			
		||||
		err := client.HandleErrorResponse(resp)
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	decoder := json.NewDecoder(resp.Body)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -33,7 +33,7 @@ func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
 | 
			
		|||
	if resp.StatusCode == http.StatusNotFound {
 | 
			
		||||
		return distribution.ErrBlobUploadUnknown
 | 
			
		||||
	}
 | 
			
		||||
	return handleErrorResponse(resp)
 | 
			
		||||
	return HandleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -47,7 +47,11 @@ func parseHTTPErrorResponse(r io.Reader) error {
 | 
			
		|||
	return errors
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func handleErrorResponse(resp *http.Response) error {
 | 
			
		||||
// HandleErrorResponse returns error parsed from HTTP response for an
 | 
			
		||||
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
 | 
			
		||||
// UnexpectedHTTPStatusError returned for response code outside of expected
 | 
			
		||||
// range.
 | 
			
		||||
func HandleErrorResponse(resp *http.Response) error {
 | 
			
		||||
	if resp.StatusCode == 401 {
 | 
			
		||||
		err := parseHTTPErrorResponse(resp.Body)
 | 
			
		||||
		if uErr, ok := err.(*UnexpectedHTTPResponseError); ok {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3,6 +3,7 @@ package client
 | 
			
		|||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
| 
						 | 
				
			
			@ -14,7 +15,6 @@ import (
 | 
			
		|||
	"github.com/docker/distribution"
 | 
			
		||||
	"github.com/docker/distribution/context"
 | 
			
		||||
	"github.com/docker/distribution/digest"
 | 
			
		||||
	"github.com/docker/distribution/manifest/schema1"
 | 
			
		||||
	"github.com/docker/distribution/reference"
 | 
			
		||||
	"github.com/docker/distribution/registry/api/v2"
 | 
			
		||||
	"github.com/docker/distribution/registry/client/transport"
 | 
			
		||||
| 
						 | 
				
			
			@ -91,7 +91,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri
 | 
			
		|||
			returnErr = io.EOF
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		return 0, handleErrorResponse(resp)
 | 
			
		||||
		return 0, HandleErrorResponse(resp)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return numFilled, returnErr
 | 
			
		||||
| 
						 | 
				
			
			@ -156,26 +156,139 @@ func (r *repository) Manifests(ctx context.Context, options ...distribution.Mani
 | 
			
		|||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *repository) Signatures() distribution.SignatureService {
 | 
			
		||||
	ms, _ := r.Manifests(r.context)
 | 
			
		||||
	return &signatures{
 | 
			
		||||
		manifests: ms,
 | 
			
		||||
func (r *repository) Tags(ctx context.Context) distribution.TagService {
 | 
			
		||||
	return &tags{
 | 
			
		||||
		client:  r.client,
 | 
			
		||||
		ub:      r.ub,
 | 
			
		||||
		context: r.context,
 | 
			
		||||
		name:    r.Name(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type signatures struct {
 | 
			
		||||
	manifests distribution.ManifestService
 | 
			
		||||
// tags implements remote tagging operations.
 | 
			
		||||
type tags struct {
 | 
			
		||||
	client  *http.Client
 | 
			
		||||
	ub      *v2.URLBuilder
 | 
			
		||||
	context context.Context
 | 
			
		||||
	name    string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) {
 | 
			
		||||
	m, err := s.manifests.Get(dgst)
 | 
			
		||||
// All returns all tags
 | 
			
		||||
func (t *tags) All(ctx context.Context) ([]string, error) {
 | 
			
		||||
	var tags []string
 | 
			
		||||
 | 
			
		||||
	u, err := t.ub.BuildTagsURL(t.name)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
		return tags, err
 | 
			
		||||
	}
 | 
			
		||||
	return m.Signatures()
 | 
			
		||||
 | 
			
		||||
	resp, err := t.client.Get(u)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return tags, err
 | 
			
		||||
	}
 | 
			
		||||
	defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
	if SuccessStatus(resp.StatusCode) {
 | 
			
		||||
		b, err := ioutil.ReadAll(resp.Body)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return tags, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		tagsResponse := struct {
 | 
			
		||||
			Tags []string `json:"tags"`
 | 
			
		||||
		}{}
 | 
			
		||||
		if err := json.Unmarshal(b, &tagsResponse); err != nil {
 | 
			
		||||
			return tags, err
 | 
			
		||||
		}
 | 
			
		||||
		tags = tagsResponse.Tags
 | 
			
		||||
		return tags, nil
 | 
			
		||||
	}
 | 
			
		||||
	return tags, HandleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error {
 | 
			
		||||
func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
 | 
			
		||||
	desc := distribution.Descriptor{}
 | 
			
		||||
	headers := response.Header
 | 
			
		||||
 | 
			
		||||
	ctHeader := headers.Get("Content-Type")
 | 
			
		||||
	if ctHeader == "" {
 | 
			
		||||
		return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
 | 
			
		||||
	}
 | 
			
		||||
	desc.MediaType = ctHeader
 | 
			
		||||
 | 
			
		||||
	digestHeader := headers.Get("Docker-Content-Digest")
 | 
			
		||||
	if digestHeader == "" {
 | 
			
		||||
		bytes, err := ioutil.ReadAll(response.Body)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return distribution.Descriptor{}, err
 | 
			
		||||
		}
 | 
			
		||||
		_, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return distribution.Descriptor{}, err
 | 
			
		||||
		}
 | 
			
		||||
		return desc, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dgst, err := digest.ParseDigest(digestHeader)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return distribution.Descriptor{}, err
 | 
			
		||||
	}
 | 
			
		||||
	desc.Digest = dgst
 | 
			
		||||
 | 
			
		||||
	lengthHeader := headers.Get("Content-Length")
 | 
			
		||||
	if lengthHeader == "" {
 | 
			
		||||
		return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
 | 
			
		||||
	}
 | 
			
		||||
	length, err := strconv.ParseInt(lengthHeader, 10, 64)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return distribution.Descriptor{}, err
 | 
			
		||||
	}
 | 
			
		||||
	desc.Size = length
 | 
			
		||||
 | 
			
		||||
	return desc, nil
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get issues a HEAD request for a Manifest against its named endpoint in order
 | 
			
		||||
// to construct a descriptor for the tag.  If the registry doesn't support HEADing
 | 
			
		||||
// a manifest, fallback to GET.
 | 
			
		||||
func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
 | 
			
		||||
	u, err := t.ub.BuildManifestURL(t.name, tag)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return distribution.Descriptor{}, err
 | 
			
		||||
	}
 | 
			
		||||
	var attempts int
 | 
			
		||||
	resp, err := t.client.Head(u)
 | 
			
		||||
 | 
			
		||||
check:
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return distribution.Descriptor{}, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	switch {
 | 
			
		||||
	case resp.StatusCode >= 200 && resp.StatusCode < 400:
 | 
			
		||||
		return descriptorFromResponse(resp)
 | 
			
		||||
	case resp.StatusCode == http.StatusMethodNotAllowed:
 | 
			
		||||
		resp, err = t.client.Get(u)
 | 
			
		||||
		attempts++
 | 
			
		||||
		if attempts > 1 {
 | 
			
		||||
			return distribution.Descriptor{}, err
 | 
			
		||||
		}
 | 
			
		||||
		goto check
 | 
			
		||||
	default:
 | 
			
		||||
		return distribution.Descriptor{}, HandleErrorResponse(resp)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
 | 
			
		||||
	panic("not implemented")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
 | 
			
		||||
	panic("not implemented")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (t *tags) Untag(ctx context.Context, tag string) error {
 | 
			
		||||
	panic("not implemented")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -186,44 +299,8 @@ type manifests struct {
 | 
			
		|||
	etags  map[string]string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ms *manifests) Tags() ([]string, error) {
 | 
			
		||||
	u, err := ms.ub.BuildTagsURL(ms.name)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resp, err := ms.client.Get(u)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
	if SuccessStatus(resp.StatusCode) {
 | 
			
		||||
		b, err := ioutil.ReadAll(resp.Body)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		tagsResponse := struct {
 | 
			
		||||
			Tags []string `json:"tags"`
 | 
			
		||||
		}{}
 | 
			
		||||
		if err := json.Unmarshal(b, &tagsResponse); err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return tagsResponse.Tags, nil
 | 
			
		||||
	}
 | 
			
		||||
	return nil, handleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ms *manifests) Exists(dgst digest.Digest) (bool, error) {
 | 
			
		||||
	// Call by Tag endpoint since the API uses the same
 | 
			
		||||
	// URL endpoint for tags and digests.
 | 
			
		||||
	return ms.ExistsByTag(dgst.String())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ms *manifests) ExistsByTag(tag string) (bool, error) {
 | 
			
		||||
	u, err := ms.ub.BuildManifestURL(ms.name, tag)
 | 
			
		||||
func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
 | 
			
		||||
	u, err := ms.ub.BuildManifestURL(ms.name, dgst.String())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -238,49 +315,66 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) {
 | 
			
		|||
	} else if resp.StatusCode == http.StatusNotFound {
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}
 | 
			
		||||
	return false, handleErrorResponse(resp)
 | 
			
		||||
	return false, HandleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) {
 | 
			
		||||
	// Call by Tag endpoint since the API uses the same
 | 
			
		||||
	// URL endpoint for tags and digests.
 | 
			
		||||
	return ms.GetByTag(dgst.String())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddEtagToTag allows a client to supply an eTag to GetByTag which will be
 | 
			
		||||
// AddEtagToTag allows a client to supply an eTag to Get which will be
 | 
			
		||||
// used for a conditional HTTP request.  If the eTag matches, a nil manifest
 | 
			
		||||
// and nil error will be returned. etag is automatically quoted when added to
 | 
			
		||||
// this map.
 | 
			
		||||
// and ErrManifestNotModified error will be returned. etag is automatically
 | 
			
		||||
// quoted when added to this map.
 | 
			
		||||
func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
 | 
			
		||||
	return func(ms distribution.ManifestService) error {
 | 
			
		||||
	return etagOption{tag, etag}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type etagOption struct{ tag, etag string }
 | 
			
		||||
 | 
			
		||||
func (o etagOption) Apply(ms distribution.ManifestService) error {
 | 
			
		||||
	if ms, ok := ms.(*manifests); ok {
 | 
			
		||||
			ms.etags[tag] = fmt.Sprintf(`"%s"`, etag)
 | 
			
		||||
		ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return fmt.Errorf("etag options is a client-only option")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) {
 | 
			
		||||
func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
 | 
			
		||||
 | 
			
		||||
	var tag string
 | 
			
		||||
	for _, option := range options {
 | 
			
		||||
		err := option(ms)
 | 
			
		||||
		if opt, ok := option.(withTagOption); ok {
 | 
			
		||||
			tag = opt.tag
 | 
			
		||||
		} else {
 | 
			
		||||
			err := option.Apply(ms)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	u, err := ms.ub.BuildManifestURL(ms.name, tag)
 | 
			
		||||
	var ref string
 | 
			
		||||
	if tag != "" {
 | 
			
		||||
		ref = tag
 | 
			
		||||
	} else {
 | 
			
		||||
		ref = dgst.String()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	u, err := ms.ub.BuildManifestURL(ms.name, ref)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	req, err := http.NewRequest("GET", u, nil)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if _, ok := ms.etags[tag]; ok {
 | 
			
		||||
		req.Header.Set("If-None-Match", ms.etags[tag])
 | 
			
		||||
	for _, t := range distribution.ManifestMediaTypes() {
 | 
			
		||||
		req.Header.Add("Accept", t)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if _, ok := ms.etags[ref]; ok {
 | 
			
		||||
		req.Header.Set("If-None-Match", ms.etags[ref])
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resp, err := ms.client.Do(req)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
| 
						 | 
				
			
			@ -289,44 +383,89 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic
 | 
			
		|||
	if resp.StatusCode == http.StatusNotModified {
 | 
			
		||||
		return nil, distribution.ErrManifestNotModified
 | 
			
		||||
	} else if SuccessStatus(resp.StatusCode) {
 | 
			
		||||
		var sm schema1.SignedManifest
 | 
			
		||||
		decoder := json.NewDecoder(resp.Body)
 | 
			
		||||
		mt := resp.Header.Get("Content-Type")
 | 
			
		||||
		body, err := ioutil.ReadAll(resp.Body)
 | 
			
		||||
 | 
			
		||||
		if err := decoder.Decode(&sm); err != nil {
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		return &sm, nil
 | 
			
		||||
		m, _, err := distribution.UnmarshalManifest(mt, body)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	return nil, handleErrorResponse(resp)
 | 
			
		||||
		return m, nil
 | 
			
		||||
	}
 | 
			
		||||
	return nil, HandleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ms *manifests) Put(m *schema1.SignedManifest) error {
 | 
			
		||||
	manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag)
 | 
			
		||||
// WithTag allows a tag to be passed into Put which enables the client
 | 
			
		||||
// to build a correct URL.
 | 
			
		||||
func WithTag(tag string) distribution.ManifestServiceOption {
 | 
			
		||||
	return withTagOption{tag}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type withTagOption struct{ tag string }
 | 
			
		||||
 | 
			
		||||
func (o withTagOption) Apply(m distribution.ManifestService) error {
 | 
			
		||||
	if _, ok := m.(*manifests); ok {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return fmt.Errorf("withTagOption is a client-only option")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Put puts a manifest.  A tag can be specified using an options parameter which uses some shared state to hold the
 | 
			
		||||
// tag name in order to build the correct upload URL.  This state is written and read under a lock.
 | 
			
		||||
func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
 | 
			
		||||
	var tag string
 | 
			
		||||
 | 
			
		||||
	for _, option := range options {
 | 
			
		||||
		if opt, ok := option.(withTagOption); ok {
 | 
			
		||||
			tag = opt.tag
 | 
			
		||||
		} else {
 | 
			
		||||
			err := option.Apply(ms)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
				return "", err
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// todo(richardscothern): do something with options here when they become applicable
 | 
			
		||||
 | 
			
		||||
	putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw))
 | 
			
		||||
	manifestURL, err := ms.ub.BuildManifestURL(ms.name, tag)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mediaType, p, err := m.Payload()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	putRequest.Header.Set("Content-Type", mediaType)
 | 
			
		||||
 | 
			
		||||
	resp, err := ms.client.Do(putRequest)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
	defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
	if SuccessStatus(resp.StatusCode) {
 | 
			
		||||
		// TODO(dmcgowan): make use of digest header
 | 
			
		||||
		return nil
 | 
			
		||||
		dgstHeader := resp.Header.Get("Docker-Content-Digest")
 | 
			
		||||
		dgst, err := digest.ParseDigest(dgstHeader)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
	return handleErrorResponse(resp)
 | 
			
		||||
 | 
			
		||||
		return dgst, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return "", HandleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ms *manifests) Delete(dgst digest.Digest) error {
 | 
			
		||||
func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
 | 
			
		||||
	u, err := ms.ub.BuildManifestURL(ms.name, dgst.String())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
| 
						 | 
				
			
			@ -345,9 +484,14 @@ func (ms *manifests) Delete(dgst digest.Digest) error {
 | 
			
		|||
	if SuccessStatus(resp.StatusCode) {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return handleErrorResponse(resp)
 | 
			
		||||
	return HandleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// todo(richardscothern): Restore interface and implementation with merge of #1050
 | 
			
		||||
/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
 | 
			
		||||
	panic("not supported")
 | 
			
		||||
}*/
 | 
			
		||||
 | 
			
		||||
type blobs struct {
 | 
			
		||||
	name   string
 | 
			
		||||
	ub     *v2.URLBuilder
 | 
			
		||||
| 
						 | 
				
			
			@ -377,11 +521,7 @@ func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Des
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
 | 
			
		||||
	desc, err := bs.Stat(ctx, dgst)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	reader, err := bs.Open(ctx, desc.Digest)
 | 
			
		||||
	reader, err := bs.Open(ctx, dgst)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -401,7 +541,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea
 | 
			
		|||
			if resp.StatusCode == http.StatusNotFound {
 | 
			
		||||
				return distribution.ErrBlobUnknown
 | 
			
		||||
			}
 | 
			
		||||
			return handleErrorResponse(resp)
 | 
			
		||||
			return HandleErrorResponse(resp)
 | 
			
		||||
		}), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -457,7 +597,7 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
 | 
			
		|||
			location:  location,
 | 
			
		||||
		}, nil
 | 
			
		||||
	}
 | 
			
		||||
	return nil, handleErrorResponse(resp)
 | 
			
		||||
	return nil, HandleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
 | 
			
		||||
| 
						 | 
				
			
			@ -488,6 +628,10 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
 | 
			
		|||
 | 
			
		||||
	if SuccessStatus(resp.StatusCode) {
 | 
			
		||||
		lengthHeader := resp.Header.Get("Content-Length")
 | 
			
		||||
		if lengthHeader == "" {
 | 
			
		||||
			return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		length, err := strconv.ParseInt(lengthHeader, 10, 64)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
 | 
			
		||||
| 
						 | 
				
			
			@ -501,7 +645,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
 | 
			
		|||
	} else if resp.StatusCode == http.StatusNotFound {
 | 
			
		||||
		return distribution.Descriptor{}, distribution.ErrBlobUnknown
 | 
			
		||||
	}
 | 
			
		||||
	return distribution.Descriptor{}, handleErrorResponse(resp)
 | 
			
		||||
	return distribution.Descriptor{}, HandleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func buildCatalogValues(maxEntries int, last string) url.Values {
 | 
			
		||||
| 
						 | 
				
			
			@ -538,7 +682,7 @@ func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
 | 
			
		|||
	if SuccessStatus(resp.StatusCode) {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return handleErrorResponse(resp)
 | 
			
		||||
	return HandleErrorResponse(resp)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										27
									
								
								vendor/src/github.com/docker/distribution/tags.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/src/github.com/docker/distribution/tags.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,27 @@
 | 
			
		|||
package distribution
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/docker/distribution/context"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// TagService provides access to information about tagged objects.
 | 
			
		||||
type TagService interface {
 | 
			
		||||
	// Get retrieves the descriptor identified by the tag. Some
 | 
			
		||||
	// implementations may differentiate between "trusted" tags and
 | 
			
		||||
	// "untrusted" tags. If a tag is "untrusted", the mapping will be returned
 | 
			
		||||
	// as an ErrTagUntrusted error, with the target descriptor.
 | 
			
		||||
	Get(ctx context.Context, tag string) (Descriptor, error)
 | 
			
		||||
 | 
			
		||||
	// Tag associates the tag with the provided descriptor, updating the
 | 
			
		||||
	// current association, if needed.
 | 
			
		||||
	Tag(ctx context.Context, tag string, desc Descriptor) error
 | 
			
		||||
 | 
			
		||||
	// Untag removes the given tag association
 | 
			
		||||
	Untag(ctx context.Context, tag string) error
 | 
			
		||||
 | 
			
		||||
	// All returns the set of tags managed by this tag service
 | 
			
		||||
	All(ctx context.Context) ([]string, error)
 | 
			
		||||
 | 
			
		||||
	// Lookup returns the set of tags referencing the given digest.
 | 
			
		||||
	Lookup(ctx context.Context, digest Descriptor) ([]string, error)
 | 
			
		||||
}
 | 
			
		||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue