1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Changes cross-repository blob mounting to a blob Create option

Also renames BlobSumService to V2MetadataService, BlobSum to
V2Metadata

Signed-off-by: Brian Bland <brian.bland@docker.com>
This commit is contained in:
Brian Bland 2016-01-13 19:34:27 -08:00
parent 7289c7218e
commit 6309947718
15 changed files with 359 additions and 305 deletions

View file

@ -152,7 +152,7 @@ RUN set -x \
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 93d9070c8bb28414de9ec96fd38c89614acd8435
ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \

View file

@ -1,137 +0,0 @@
package metadata
import (
"encoding/json"
"github.com/docker/distribution/digest"
"github.com/docker/docker/layer"
)
// BlobSumService maps layer IDs to a set of known blobsums for
// the layer.
type BlobSumService struct {
store Store
}
// BlobSum contains the digest and source repository information for a layer.
type BlobSum struct {
Digest digest.Digest
SourceRepository string
}
// maxBlobSums is the number of blobsums to keep per layer DiffID.
const maxBlobSums = 50
// NewBlobSumService creates a new blobsum mapping service.
func NewBlobSumService(store Store) *BlobSumService {
return &BlobSumService{
store: store,
}
}
func (blobserv *BlobSumService) diffIDNamespace() string {
return "blobsum-storage"
}
func (blobserv *BlobSumService) blobSumNamespace() string {
return "blobsum-lookup"
}
func (blobserv *BlobSumService) diffIDKey(diffID layer.DiffID) string {
return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
}
func (blobserv *BlobSumService) blobSumKey(blobsum BlobSum) string {
return string(blobsum.Digest.Algorithm()) + "/" + blobsum.Digest.Hex()
}
// GetBlobSums finds the blobsums associated with a layer DiffID.
func (blobserv *BlobSumService) GetBlobSums(diffID layer.DiffID) ([]BlobSum, error) {
jsonBytes, err := blobserv.store.Get(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID))
if err != nil {
return nil, err
}
var blobsums []BlobSum
if err := json.Unmarshal(jsonBytes, &blobsums); err != nil {
return nil, err
}
return blobsums, nil
}
// GetDiffID finds a layer DiffID from a blobsum hash.
func (blobserv *BlobSumService) GetDiffID(blobsum BlobSum) (layer.DiffID, error) {
diffIDBytes, err := blobserv.store.Get(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum))
if err != nil {
return layer.DiffID(""), err
}
return layer.DiffID(diffIDBytes), nil
}
// Add associates a blobsum with a layer DiffID. If too many blobsums are
// present, the oldest one is dropped.
func (blobserv *BlobSumService) Add(diffID layer.DiffID, blobsum BlobSum) error {
oldBlobSums, err := blobserv.GetBlobSums(diffID)
if err != nil {
oldBlobSums = nil
}
newBlobSums := make([]BlobSum, 0, len(oldBlobSums)+1)
// Copy all other blobsums to new slice
for _, oldSum := range oldBlobSums {
if oldSum != blobsum {
newBlobSums = append(newBlobSums, oldSum)
}
}
newBlobSums = append(newBlobSums, blobsum)
if len(newBlobSums) > maxBlobSums {
newBlobSums = newBlobSums[len(newBlobSums)-maxBlobSums:]
}
jsonBytes, err := json.Marshal(newBlobSums)
if err != nil {
return err
}
err = blobserv.store.Set(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID), jsonBytes)
if err != nil {
return err
}
return blobserv.store.Set(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum), []byte(diffID))
}
// Remove unassociates a blobsum from a layer DiffID.
func (blobserv *BlobSumService) Remove(blobsum BlobSum) error {
diffID, err := blobserv.GetDiffID(blobsum)
if err != nil {
return err
}
oldBlobSums, err := blobserv.GetBlobSums(diffID)
if err != nil {
oldBlobSums = nil
}
newBlobSums := make([]BlobSum, 0, len(oldBlobSums))
// Copy all other blobsums to new slice
for _, oldSum := range oldBlobSums {
if oldSum != blobsum {
newBlobSums = append(newBlobSums, oldSum)
}
}
if len(newBlobSums) == 0 {
return blobserv.store.Delete(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID))
}
jsonBytes, err := json.Marshal(newBlobSums)
if err != nil {
return err
}
return blobserv.store.Set(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID), jsonBytes)
}

View file

@ -0,0 +1,137 @@
package metadata
import (
"encoding/json"
"github.com/docker/distribution/digest"
"github.com/docker/docker/layer"
)
// V2MetadataService maps layer IDs to a set of known metadata for
// the layer.
type V2MetadataService struct {
store Store
}
// V2Metadata contains the digest and source repository information for a layer.
type V2Metadata struct {
Digest digest.Digest
SourceRepository string
}
// maxMetadata is the number of metadata entries to keep per layer DiffID.
const maxMetadata = 50
// NewV2MetadataService creates a new diff ID to v2 metadata mapping service.
func NewV2MetadataService(store Store) *V2MetadataService {
return &V2MetadataService{
store: store,
}
}
func (serv *V2MetadataService) diffIDNamespace() string {
return "v2metadata-by-diffid"
}
func (serv *V2MetadataService) digestNamespace() string {
return "diffid-by-digest"
}
func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string {
return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
}
func (serv *V2MetadataService) digestKey(dgst digest.Digest) string {
return string(dgst.Algorithm()) + "/" + dgst.Hex()
}
// GetMetadata finds the metadata associated with a layer DiffID.
func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) {
jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID))
if err != nil {
return nil, err
}
var metadata []V2Metadata
if err := json.Unmarshal(jsonBytes, &metadata); err != nil {
return nil, err
}
return metadata, nil
}
// GetDiffID finds a layer DiffID from a digest.
func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
if err != nil {
return layer.DiffID(""), err
}
return layer.DiffID(diffIDBytes), nil
}
// Add associates metadata with a layer DiffID. If too many metadata entries are
// present, the oldest one is dropped.
func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error {
oldMetadata, err := serv.GetMetadata(diffID)
if err != nil {
oldMetadata = nil
}
newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1)
// Copy all other metadata to new slice
for _, oldMeta := range oldMetadata {
if oldMeta != metadata {
newMetadata = append(newMetadata, oldMeta)
}
}
newMetadata = append(newMetadata, metadata)
if len(newMetadata) > maxMetadata {
newMetadata = newMetadata[len(newMetadata)-maxMetadata:]
}
jsonBytes, err := json.Marshal(newMetadata)
if err != nil {
return err
}
err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
if err != nil {
return err
}
return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID))
}
// Remove unassociates a metadata entry from a layer DiffID.
func (serv *V2MetadataService) Remove(metadata V2Metadata) error {
diffID, err := serv.GetDiffID(metadata.Digest)
if err != nil {
return err
}
oldMetadata, err := serv.GetMetadata(diffID)
if err != nil {
oldMetadata = nil
}
newMetadata := make([]V2Metadata, 0, len(oldMetadata))
// Copy all other metadata to new slice
for _, oldMeta := range oldMetadata {
if oldMeta != metadata {
newMetadata = append(newMetadata, oldMeta)
}
}
if len(newMetadata) == 0 {
return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID))
}
jsonBytes, err := json.Marshal(newMetadata)
if err != nil {
return err
}
return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
}

View file

@ -12,7 +12,7 @@ import (
"github.com/docker/docker/layer"
)
func TestBlobSumService(t *testing.T) {
func TestV2MetadataService(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
if err != nil {
t.Fatalf("could not create temp dir: %v", err)
@ -23,41 +23,41 @@ func TestBlobSumService(t *testing.T) {
if err != nil {
t.Fatalf("could not create metadata store: %v", err)
}
blobSumService := NewBlobSumService(metadataStore)
V2MetadataService := NewV2MetadataService(metadataStore)
tooManyBlobSums := make([]BlobSum, 100)
tooManyBlobSums := make([]V2Metadata, 100)
for i := range tooManyBlobSums {
randDigest := randomDigest()
tooManyBlobSums[i] = BlobSum{Digest: randDigest}
tooManyBlobSums[i] = V2Metadata{Digest: randDigest}
}
testVectors := []struct {
diffID layer.DiffID
blobsums []BlobSum
metadata []V2Metadata
}{
{
diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
blobsums: []BlobSum{
metadata: []V2Metadata{
{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
},
},
{
diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
blobsums: []BlobSum{
metadata: []V2Metadata{
{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
{Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")},
},
},
{
diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
blobsums: tooManyBlobSums,
metadata: tooManyBlobSums,
},
}
// Set some associations
for _, vec := range testVectors {
for _, blobsum := range vec.blobsums {
err := blobSumService.Add(vec.diffID, blobsum)
for _, blobsum := range vec.metadata {
err := V2MetadataService.Add(vec.diffID, blobsum)
if err != nil {
t.Fatalf("error calling Set: %v", err)
}
@ -66,37 +66,37 @@ func TestBlobSumService(t *testing.T) {
// Check the correct values are read back
for _, vec := range testVectors {
blobsums, err := blobSumService.GetBlobSums(vec.diffID)
metadata, err := V2MetadataService.GetMetadata(vec.diffID)
if err != nil {
t.Fatalf("error calling Get: %v", err)
}
expectedBlobsums := len(vec.blobsums)
if expectedBlobsums > 50 {
expectedBlobsums = 50
expectedMetadataEntries := len(vec.metadata)
if expectedMetadataEntries > 50 {
expectedMetadataEntries = 50
}
if !reflect.DeepEqual(blobsums, vec.blobsums[len(vec.blobsums)-expectedBlobsums:len(vec.blobsums)]) {
if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) {
t.Fatal("Get returned incorrect layer ID")
}
}
// Test GetBlobSums on a nonexistent entry
_, err = blobSumService.GetBlobSums(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
// Test GetMetadata on a nonexistent entry
_, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
if err == nil {
t.Fatal("expected error looking up nonexistent entry")
}
// Test GetDiffID on a nonexistent entry
_, err = blobSumService.GetDiffID(BlobSum{Digest: digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")})
_, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
if err == nil {
t.Fatal("expected error looking up nonexistent entry")
}
// Overwrite one of the entries and read it back
err = blobSumService.Add(testVectors[1].diffID, testVectors[0].blobsums[0])
err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0])
if err != nil {
t.Fatalf("error calling Add: %v", err)
}
diffID, err := blobSumService.GetDiffID(testVectors[0].blobsums[0])
diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest)
if err != nil {
t.Fatalf("error calling GetDiffID: %v", err)
}

View file

@ -61,10 +61,10 @@ func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo,
switch endpoint.Version {
case registry.APIVersion2:
return &v2Puller{
blobSumService: metadata.NewBlobSumService(imagePullConfig.MetadataStore),
endpoint: endpoint,
config: imagePullConfig,
repoInfo: repoInfo,
V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore),
endpoint: endpoint,
config: imagePullConfig,
repoInfo: repoInfo,
}, nil
case registry.APIVersion1:
return &v1Puller{

View file

@ -33,11 +33,11 @@ import (
var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
type v2Puller struct {
blobSumService *metadata.BlobSumService
endpoint registry.APIEndpoint
config *ImagePullConfig
repoInfo *registry.RepositoryInfo
repo distribution.Repository
V2MetadataService *metadata.V2MetadataService
endpoint registry.APIEndpoint
config *ImagePullConfig
repoInfo *registry.RepositoryInfo
repo distribution.Repository
// confirmedV2 is set to true if we confirm we're talking to a v2
// registry. This is used to limit fallbacks to the v1 protocol.
confirmedV2 bool
@ -110,10 +110,10 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e
}
type v2LayerDescriptor struct {
digest digest.Digest
repoInfo *registry.RepositoryInfo
repo distribution.Repository
blobSumService *metadata.BlobSumService
digest digest.Digest
repoInfo *registry.RepositoryInfo
repo distribution.Repository
V2MetadataService *metadata.V2MetadataService
}
func (ld *v2LayerDescriptor) Key() string {
@ -125,7 +125,7 @@ func (ld *v2LayerDescriptor) ID() string {
}
func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
return ld.blobSumService.GetDiffID(metadata.BlobSum{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
return ld.V2MetadataService.GetDiffID(ld.digest)
}
func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
@ -197,7 +197,7 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre
func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
// Cache mapping from this layer's DiffID to the blobsum
ld.blobSumService.Add(diffID, metadata.BlobSum{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
}
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
@ -334,10 +334,10 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverif
}
layerDescriptor := &v2LayerDescriptor{
digest: blobSum,
repoInfo: p.repoInfo,
repo: p.repo,
blobSumService: p.blobSumService,
digest: blobSum,
repoInfo: p.repoInfo,
repo: p.repo,
V2MetadataService: p.V2MetadataService,
}
descriptors = append(descriptors, layerDescriptor)
@ -400,10 +400,10 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
// to top-most, so that the downloads slice gets ordered correctly.
for _, d := range mfst.References() {
layerDescriptor := &v2LayerDescriptor{
digest: d.Digest,
repo: p.repo,
repoInfo: p.repoInfo,
blobSumService: p.blobSumService,
digest: d.Digest,
repo: p.repo,
repoInfo: p.repoInfo,
V2MetadataService: p.V2MetadataService,
}
descriptors = append(descriptors, layerDescriptor)

View file

@ -71,11 +71,11 @@ func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *reg
switch endpoint.Version {
case registry.APIVersion2:
return &v2Pusher{
blobSumService: metadata.NewBlobSumService(imagePushConfig.MetadataStore),
ref: ref,
endpoint: endpoint,
repoInfo: repoInfo,
config: imagePushConfig,
v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore),
ref: ref,
endpoint: endpoint,
repoInfo: repoInfo,
config: imagePushConfig,
}, nil
case registry.APIVersion1:
return &v1Pusher{

View file

@ -11,6 +11,7 @@ import (
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
distreference "github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/client"
"github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
@ -34,12 +35,12 @@ type PushResult struct {
}
type v2Pusher struct {
blobSumService *metadata.BlobSumService
ref reference.Named
endpoint registry.APIEndpoint
repoInfo *registry.RepositoryInfo
config *ImagePushConfig
repo distribution.Repository
v2MetadataService *metadata.V2MetadataService
ref reference.Named
endpoint registry.APIEndpoint
repoInfo *registry.RepositoryInfo
config *ImagePushConfig
repo distribution.Repository
// pushState is state built by the Download functions.
pushState pushState
@ -130,10 +131,10 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, ima
var descriptors []xfer.UploadDescriptor
descriptorTemplate := v2PushDescriptor{
blobSumService: p.blobSumService,
repoInfo: p.repoInfo,
repo: p.repo,
pushState: &p.pushState,
v2MetadataService: p.v2MetadataService,
repoInfo: p.repoInfo,
repo: p.repo,
pushState: &p.pushState,
}
// Loop bounds condition is to avoid pushing the base layer on Windows.
@ -210,11 +211,11 @@ func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuild
}
type v2PushDescriptor struct {
layer layer.Layer
blobSumService *metadata.BlobSumService
repoInfo reference.Named
repo distribution.Repository
pushState *pushState
layer layer.Layer
v2MetadataService *metadata.V2MetadataService
repoInfo reference.Named
repo distribution.Repository
pushState *pushState
}
func (pd *v2PushDescriptor) Key() string {
@ -242,10 +243,10 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
}
pd.pushState.Unlock()
// Do we have any blobsums associated with this layer's DiffID?
possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
// Do we have any metadata associated with this layer's DiffID?
v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID)
if err == nil {
descriptor, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repoInfo, pd.repo, pd.pushState)
descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState)
if err != nil {
progress.Update(progressOutput, pd.ID(), "Image push failed")
return retryOnError(err)
@ -265,39 +266,69 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
// then push the blob.
bs := pd.repo.Blobs(ctx)
var mountFrom metadata.V2Metadata
// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
for _, blobsum := range possibleBlobsums {
sourceRepo, err := reference.ParseNamed(blobsum.SourceRepository)
for _, metadata := range v2Metadata {
sourceRepo, err := reference.ParseNamed(metadata.SourceRepository)
if err != nil {
continue
}
if pd.repoInfo.Hostname() == sourceRepo.Hostname() {
logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, blobsum.Digest, sourceRepo.FullName())
desc, err := bs.Mount(ctx, sourceRepo.RemoteName(), blobsum.Digest)
if err == nil {
progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", sourceRepo.RemoteName())
pd.pushState.Lock()
pd.pushState.confirmedV2 = true
pd.pushState.remoteLayers[diffID] = desc
pd.pushState.Unlock()
// Cache mapping from this layer's DiffID to the blobsum
if err := pd.blobSumService.Add(diffID, metadata.BlobSum{Digest: blobsum.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
return xfer.DoNotRetry{Err: err}
}
return nil
}
// Unable to mount layer from this repository, so this source mapping is no longer valid
logrus.Debugf("unassociating layer %s (%s) with %s", diffID, blobsum.Digest, sourceRepo.FullName())
pd.blobSumService.Remove(blobsum)
logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, metadata.Digest, sourceRepo.FullName())
mountFrom = metadata
break
}
}
var createOpts []distribution.BlobCreateOption
if mountFrom.SourceRepository != "" {
namedRef, err := reference.WithName(mountFrom.SourceRepository)
if err != nil {
return err
}
// TODO (brianbland): We need to construct a reference where the Name is
// only the full remote name, so clean this up when distribution has a
// richer reference package
remoteRef, err := distreference.WithName(namedRef.RemoteName())
if err != nil {
return err
}
canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest)
if err != nil {
return err
}
createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
}
// Send the layer
layerUpload, err := bs.Create(ctx)
layerUpload, err := bs.Create(ctx, createOpts...)
switch err := err.(type) {
case distribution.ErrBlobMounted:
progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
pd.pushState.Lock()
pd.pushState.confirmedV2 = true
pd.pushState.remoteLayers[diffID] = err.Descriptor
pd.pushState.Unlock()
// Cache mapping from this layer's DiffID to the blobsum
if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
return xfer.DoNotRetry{Err: err}
}
return nil
}
if mountFrom.SourceRepository != "" {
// unable to mount layer from this repository, so this source mapping is no longer valid
logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository)
pd.v2MetadataService.Remove(mountFrom)
}
if err != nil {
return retryOnError(err)
}
@ -333,7 +364,7 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
progress.Update(progressOutput, pd.ID(), "Pushed")
// Cache mapping from this layer's DiffID to the blobsum
if err := pd.blobSumService.Add(diffID, metadata.BlobSum{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
return xfer.DoNotRetry{Err: err}
}
@ -362,16 +393,16 @@ func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
return pd.pushState.remoteLayers[pd.DiffID()]
}
// blobSumAlreadyExists checks if the registry already know about any of the
// blobsums passed in the "blobsums" slice. If it finds one that the registry
// layerAlreadyExists checks if the registry already know about any of the
// metadata passed in the "metadata" slice. If it finds one that the registry
// knows about, it returns the known digest and "true".
func blobSumAlreadyExists(ctx context.Context, blobsums []metadata.BlobSum, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
for _, blobSum := range blobsums {
func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
for _, meta := range metadata {
// Only check blobsums that are known to this repository or have an unknown source
if blobSum.SourceRepository != "" && blobSum.SourceRepository != repoInfo.FullName() {
if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() {
continue
}
descriptor, err := repo.Blobs(ctx).Stat(ctx, blobSum.Digest)
descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest)
switch err {
case nil:
descriptor.MediaType = schema2.MediaTypeLayer

View file

@ -44,7 +44,7 @@ clone git github.com/boltdb/bolt v1.1.0
clone git github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
# get graph and distribution packages
clone git github.com/docker/distribution 93d9070c8bb28414de9ec96fd38c89614acd8435
clone git github.com/docker/distribution cb08de17d74bef86ce6c5abe8b240e282f5750be
clone git github.com/vbatts/tar-split v0.9.11
# get desired notary commit, might also need to be updated in Dockerfile

View file

@ -166,9 +166,11 @@ func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) {
// ensure that layers were mounted from the first repo during push
c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true)
// ensure that we can pull the cross-repo-pushed repository
// ensure that we can pull and run the cross-repo-pushed repository
dockerCmd(c, "rmi", destRepoName)
dockerCmd(c, "pull", destRepoName)
out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world")
c.Assert(out3, check.Equals, "hello world")
}
func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) {
@ -190,9 +192,11 @@ func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c
// schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen
c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, false)
// ensure that we can pull the second pushed repository
// ensure that we can pull and run the second pushed repository
dockerCmd(c, "rmi", destRepoName)
dockerCmd(c, "pull", destRepoName)
out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world")
c.Assert(out3, check.Equals, "hello world")
}
func (s *DockerTrustSuite) TestTrustedPush(c *check.C) {

View file

@ -476,8 +476,8 @@ func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metad
if err == nil { // best effort
dgst, err := digest.ParseDigest(string(checksum))
if err == nil {
blobSumService := metadata.NewBlobSumService(ms)
blobSumService.Add(layer.DiffID(), metadata.BlobSum{Digest: dgst})
V2MetadataService := metadata.NewV2MetadataService(ms)
V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst})
}
}
_, err = ls.Release(layer)

View file

@ -210,19 +210,19 @@ func TestMigrateImages(t *testing.T) {
t.Fatalf("invalid register count: expected %q, got %q", expected, actual)
}
blobSumService := metadata.NewBlobSumService(ms)
blobsums, err := blobSumService.GetBlobSums(layer.EmptyLayer.DiffID())
v2MetadataService := metadata.NewV2MetadataService(ms)
receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID())
if err != nil {
t.Fatal(err)
}
expectedBlobsums := []metadata.BlobSum{
expectedMetadata := []metadata.V2Metadata{
{Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")},
{Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
}
if !reflect.DeepEqual(expectedBlobsums, blobsums) {
t.Fatalf("invalid blobsums: expected %q, got %q", expectedBlobsums, blobsums)
if !reflect.DeepEqual(expectedMetadata, receivedMetadata) {
t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata)
}
}

View file

@ -9,6 +9,7 @@ import (
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
)
var (
@ -40,6 +41,18 @@ func (err ErrBlobInvalidDigest) Error() string {
err.Digest, err.Reason)
}
// ErrBlobMounted returned when a blob is mounted from another repository
// instead of initiating an upload session.
type ErrBlobMounted struct {
From reference.Canonical
Descriptor Descriptor
}
func (err ErrBlobMounted) Error() string {
return fmt.Sprintf("blob mounted from: %v to: %v",
err.From, err.Descriptor)
}
// Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
@ -151,14 +164,19 @@ type BlobIngester interface {
// returned handle can be written to and later resumed using an opaque
// identifier. With this approach, one can Close and Resume a BlobWriter
// multiple times until the BlobWriter is committed or cancelled.
Create(ctx context.Context) (BlobWriter, error)
Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
// Resume attempts to resume a write to a blob, identified by an id.
Resume(ctx context.Context, id string) (BlobWriter, error)
}
// Mount adds a blob to this service from another source repository,
// identified by a digest.
Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (Descriptor, error)
// BlobCreateOption is a general extensible function argument for blob creation
// methods. A BlobIngester may choose to honor any or none of the given
// BlobCreateOptions, which can be specific to the implementation of the
// BlobIngester receiving them.
// TODO (brianbland): unify this with ManifestServiceOption in the future
type BlobCreateOption interface {
Apply(interface{}) error
}
// BlobWriter provides a handle for inserting data into a blob store.

View file

@ -11,7 +11,7 @@ machine:
post:
# go
- gvm install go1.5 --prefer-binary --name=stable
- gvm install go1.5.3 --prefer-binary --name=stable
environment:
# Convenient shortcuts to "common" locations

View file

@ -10,7 +10,6 @@ import (
"net/http"
"net/url"
"strconv"
"sync"
"time"
"github.com/docker/distribution"
@ -500,9 +499,6 @@ type blobs struct {
statter distribution.BlobDescriptorService
distribution.BlobDeleter
cacheLock sync.Mutex
cachedBlobUpload distribution.BlobWriter
}
func sanitizeLocation(location, base string) (string, error) {
@ -576,18 +572,54 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
return writer.Commit(ctx, desc)
}
func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
bs.cacheLock.Lock()
if bs.cachedBlobUpload != nil {
upload := bs.cachedBlobUpload
bs.cachedBlobUpload = nil
bs.cacheLock.Unlock()
return upload, nil
// createOptions is a collection of blob creation modifiers relevant to general
// blob storage intended to be configured by the BlobCreateOption.Apply method.
type createOptions struct {
Mount struct {
ShouldMount bool
From reference.Canonical
}
bs.cacheLock.Unlock()
}
u, err := bs.ub.BuildBlobUploadURL(bs.name)
type optionFunc func(interface{}) error
func (f optionFunc) Apply(v interface{}) error {
return f(v)
}
// WithMountFrom returns a BlobCreateOption which designates that the blob should be
// mounted from the given canonical reference.
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
return optionFunc(func(v interface{}) error {
opts, ok := v.(*createOptions)
if !ok {
return fmt.Errorf("unexpected options type: %T", v)
}
opts.Mount.ShouldMount = true
opts.Mount.From = ref
return nil
})
}
func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
var opts createOptions
for _, option := range options {
err := option.Apply(&opts)
if err != nil {
return nil, err
}
}
var values []url.Values
if opts.Mount.ShouldMount {
values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
}
u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
if err != nil {
return nil, err
}
@ -598,7 +630,14 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
switch resp.StatusCode {
case http.StatusCreated:
desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
if err != nil {
return nil, err
}
return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
case http.StatusAccepted:
// TODO(dmcgowan): Check for invalid UUID
uuid := resp.Header.Get("Docker-Upload-UUID")
location, err := sanitizeLocation(resp.Header.Get("Location"), u)
@ -613,53 +652,15 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
startedAt: time.Now(),
location: location,
}, nil
default:
return nil, HandleErrorResponse(resp)
}
return nil, HandleErrorResponse(resp)
}
func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
panic("not implemented")
}
func (bs *blobs) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) {
u, err := bs.ub.BuildBlobUploadURL(bs.name, url.Values{"from": {sourceRepo}, "mount": {dgst.String()}})
if err != nil {
return distribution.Descriptor{}, err
}
resp, err := bs.client.Post(u, "", nil)
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusCreated:
return bs.Stat(ctx, dgst)
case http.StatusAccepted:
// Triggered a blob upload (legacy behavior), so cache the creation info
uuid := resp.Header.Get("Docker-Upload-UUID")
location, err := sanitizeLocation(resp.Header.Get("Location"), u)
if err != nil {
return distribution.Descriptor{}, err
}
bs.cacheLock.Lock()
bs.cachedBlobUpload = &httpBlobUpload{
statter: bs.statter,
client: bs.client,
uuid: uuid,
startedAt: time.Now(),
location: location,
}
bs.cacheLock.Unlock()
return distribution.Descriptor{}, HandleErrorResponse(resp)
default:
return distribution.Descriptor{}, HandleErrorResponse(resp)
}
}
func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
return bs.statter.Clear(ctx, dgst)
}