1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

LCOW: Re-coalesce stores

Signed-off-by: John Howard <jhoward@microsoft.com>

The re-coalesces the daemon stores which were split as part of the
original LCOW implementation.

This is part of the work discussed in https://github.com/moby/moby/issues/34617,
in particular see the document linked to in that issue.
This commit is contained in:
John Howard 2017-08-24 11:48:16 -07:00
parent 6feae06003
commit ce8e529e18
66 changed files with 549 additions and 635 deletions

View file

@ -17,7 +17,7 @@ import (
// ImageComponent provides an interface for working with images // ImageComponent provides an interface for working with images
type ImageComponent interface { type ImageComponent interface {
SquashImage(from string, to string) (string, error) SquashImage(from string, to string) (string, error)
TagImageWithReference(image.ID, string, reference.Named) error TagImageWithReference(image.ID, reference.Named) error
} }
// Builder defines interface for running a build // Builder defines interface for running a build

View file

@ -3,11 +3,9 @@ package build
import ( import (
"fmt" "fmt"
"io" "io"
"runtime"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -35,12 +33,7 @@ func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagge
// TagImages creates image tags for the imageID // TagImages creates image tags for the imageID
func (bt *Tagger) TagImages(imageID image.ID) error { func (bt *Tagger) TagImages(imageID image.ID) error {
for _, rt := range bt.repoAndTags { for _, rt := range bt.repoAndTags {
// TODO @jhowardmsft LCOW support. Will need revisiting. if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil {
platform := runtime.GOOS
if system.LCOWSupported() {
platform = "linux"
}
if err := bt.imageComponent.TagImageWithReference(imageID, platform, rt); err != nil {
return err return err
} }
fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt))

View file

@ -44,7 +44,7 @@ type Backend interface {
// ContainerCreateWorkdir creates the workdir // ContainerCreateWorkdir creates the workdir
ContainerCreateWorkdir(containerID string) error ContainerCreateWorkdir(containerID string) error
CreateImage(config []byte, parent string, platform string) (Image, error) CreateImage(config []byte, parent string) (Image, error)
ImageCacheBuilder ImageCacheBuilder
} }
@ -79,7 +79,7 @@ type Result struct {
// ImageCacheBuilder represents a generator for stateful image cache. // ImageCacheBuilder represents a generator for stateful image cache.
type ImageCacheBuilder interface { type ImageCacheBuilder interface {
// MakeImageCache creates a stateful image cache. // MakeImageCache creates a stateful image cache.
MakeImageCache(cacheFrom []string, platform string) ImageCache MakeImageCache(cacheFrom []string) ImageCache
} }
// ImageCache abstracts an image cache. // ImageCache abstracts an image cache.

View file

@ -123,7 +123,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
PathCache: bm.pathCache, PathCache: bm.pathCache,
IDMappings: bm.idMappings, IDMappings: bm.idMappings,
} }
return newBuilder(ctx, builderOptions, os).build(source, dockerfile) return newBuilder(ctx, builderOptions).build(source, dockerfile)
} }
func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) { func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) {
@ -190,7 +190,7 @@ type Builder struct {
} }
// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. // newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options.
func newBuilder(clientCtx context.Context, options builderOptions, os string) *Builder { func newBuilder(clientCtx context.Context, options builderOptions) *Builder {
config := options.Options config := options.Options
if config == nil { if config == nil {
config = new(types.ImageBuildOptions) config = new(types.ImageBuildOptions)
@ -207,7 +207,7 @@ func newBuilder(clientCtx context.Context, options builderOptions, os string) *B
idMappings: options.IDMappings, idMappings: options.IDMappings,
imageSources: newImageSources(clientCtx, options), imageSources: newImageSources(clientCtx, options),
pathCache: options.PathCache, pathCache: options.PathCache,
imageProber: newImageProber(options.Backend, config.CacheFrom, os, config.NoCache), imageProber: newImageProber(options.Backend, config.CacheFrom, config.NoCache),
containerManager: newContainerManager(options.Backend), containerManager: newContainerManager(options.Backend),
} }
@ -367,14 +367,9 @@ func BuildFromConfig(config *container.Config, changes []string) (*container.Con
return nil, errdefs.InvalidParameter(err) return nil, errdefs.InvalidParameter(err)
} }
os := runtime.GOOS
if dockerfile.OS != "" {
os = dockerfile.OS
}
b := newBuilder(context.Background(), builderOptions{ b := newBuilder(context.Background(), builderOptions{
Options: &types.ImageBuildOptions{NoCache: true}, Options: &types.ImageBuildOptions{NoCache: true},
}, os) })
// ensure that the commands are valid // ensure that the commands are valid
for _, n := range dockerfile.AST.Children { for _, n := range dockerfile.AST.Children {

View file

@ -31,7 +31,7 @@ func newBuilderWithMockBackend() *Builder {
Options: &types.ImageBuildOptions{Platform: runtime.GOOS}, Options: &types.ImageBuildOptions{Platform: runtime.GOOS},
Backend: mockBackend, Backend: mockBackend,
}), }),
imageProber: newImageProber(mockBackend, nil, runtime.GOOS, false), imageProber: newImageProber(mockBackend, nil, false),
containerManager: newContainerManager(mockBackend), containerManager: newContainerManager(mockBackend),
} }
return b return b
@ -427,10 +427,10 @@ func TestRunWithBuildArgs(t *testing.T) {
} }
mockBackend := b.docker.(*MockBackend) mockBackend := b.docker.(*MockBackend)
mockBackend.makeImageCacheFunc = func(_ []string, _ string) builder.ImageCache { mockBackend.makeImageCacheFunc = func(_ []string) builder.ImageCache {
return imageCache return imageCache
} }
b.imageProber = newImageProber(mockBackend, nil, runtime.GOOS, false) b.imageProber = newImageProber(mockBackend, nil, false)
mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ReleaseableLayer, error) { mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ReleaseableLayer, error) {
return &mockImage{ return &mockImage{
id: "abcdef", id: "abcdef",

View file

@ -19,13 +19,13 @@ type imageProber struct {
cacheBusted bool cacheBusted bool
} }
func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, platform string, noCache bool) ImageProber { func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, noCache bool) ImageProber {
if noCache { if noCache {
return &nopProber{} return &nopProber{}
} }
reset := func() builder.ImageCache { reset := func() builder.ImageCache {
return cacheBuilder.MakeImageCache(cacheFrom, platform) return cacheBuilder.MakeImageCache(cacheFrom)
} }
return &imageProber{cache: reset(), reset: reset} return &imageProber{cache: reset(), reset: reset}
} }

View file

@ -154,7 +154,7 @@ func (b *Builder) exportImage(state *dispatchState, imageMount *imageMount, runC
return errors.Wrap(err, "failed to encode image config") return errors.Wrap(err, "failed to encode image config")
} }
exportedImage, err := b.docker.CreateImage(config, state.imageID, parentImage.OS) exportedImage, err := b.docker.CreateImage(config, state.imageID)
if err != nil { if err != nil {
return errors.Wrapf(err, "failed to export image") return errors.Wrapf(err, "failed to export image")
} }

View file

@ -20,7 +20,7 @@ type MockBackend struct {
containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
commitFunc func(string, *backend.ContainerCommitConfig) (string, error) commitFunc func(string, *backend.ContainerCommitConfig) (string, error)
getImageFunc func(string) (builder.Image, builder.ReleaseableLayer, error) getImageFunc func(string) (builder.Image, builder.ReleaseableLayer, error)
makeImageCacheFunc func(cacheFrom []string, platform string) builder.ImageCache makeImageCacheFunc func(cacheFrom []string) builder.ImageCache
} }
func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error { func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error {
@ -73,14 +73,14 @@ func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID st
return &mockImage{id: "theid"}, &mockLayer{}, nil return &mockImage{id: "theid"}, &mockLayer{}, nil
} }
func (m *MockBackend) MakeImageCache(cacheFrom []string, platform string) builder.ImageCache { func (m *MockBackend) MakeImageCache(cacheFrom []string) builder.ImageCache {
if m.makeImageCacheFunc != nil { if m.makeImageCacheFunc != nil {
return m.makeImageCacheFunc(cacheFrom, platform) return m.makeImageCacheFunc(cacheFrom)
} }
return nil return nil
} }
func (m *MockBackend) CreateImage(config []byte, parent string, platform string) (builder.Image, error) { func (m *MockBackend) CreateImage(config []byte, parent string) (builder.Image, error) {
return nil, nil return nil, nil
} }

View file

@ -2,7 +2,6 @@ package daemon
import ( import (
"io" "io"
"runtime"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
@ -24,6 +23,7 @@ type releaseableLayer struct {
layerStore layer.Store layerStore layer.Store
roLayer layer.Layer roLayer layer.Layer
rwLayer layer.RWLayer rwLayer layer.RWLayer
os string
} }
func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) { func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) {
@ -35,7 +35,7 @@ func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) {
} }
mountID := stringid.GenerateRandomID() mountID := stringid.GenerateRandomID()
rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil) rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, rl.os, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to create rwlayer") return nil, errors.Wrap(err, "failed to create rwlayer")
} }
@ -67,12 +67,12 @@ func (rl *releaseableLayer) Commit(os string) (builder.ReleaseableLayer, error)
} }
defer stream.Close() defer stream.Close()
newLayer, err := rl.layerStore.Register(stream, chainID, layer.OS(os)) newLayer, err := rl.layerStore.Register(stream, chainID, os)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// TODO: An optimization would be to handle empty layers before returning // TODO: An optimization woudld be to handle empty layers before returning
return &releaseableLayer{layerStore: rl.layerStore, roLayer: newLayer}, nil return &releaseableLayer{layerStore: rl.layerStore, roLayer: newLayer, os: os}, nil
} }
func (rl *releaseableLayer) DiffID() layer.DiffID { func (rl *releaseableLayer) DiffID() layer.DiffID {
@ -128,9 +128,9 @@ func (rl *releaseableLayer) releaseROLayer() error {
return err return err
} }
func newReleasableLayerForImage(img *image.Image, layerStore layer.Store) (builder.ReleaseableLayer, error) { func newReleasableLayerForImage(img *image.Image, layerStore layer.Store, os string) (builder.ReleaseableLayer, error) {
if img == nil || img.RootFS.ChainID() == "" { if img == nil || img.RootFS.ChainID() == "" {
return &releaseableLayer{layerStore: layerStore}, nil return &releaseableLayer{layerStore: layerStore, os: os}, nil
} }
// Hold a reference to the image layer so that it can't be removed before // Hold a reference to the image layer so that it can't be removed before
// it is released // it is released
@ -138,11 +138,11 @@ func newReleasableLayerForImage(img *image.Image, layerStore layer.Store) (build
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID())
} }
return &releaseableLayer{layerStore: layerStore, roLayer: roLayer}, nil return &releaseableLayer{layerStore: layerStore, roLayer: roLayer, os: os}, nil
} }
// TODO: could this use the regular daemon PullImage ? // TODO: could this use the regular daemon PullImage ?
func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, platform string) (*image.Image, error) { func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, os string) (*image.Image, error) {
ref, err := reference.ParseNormalizedNamed(name) ref, err := reference.ParseNormalizedNamed(name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -161,7 +161,7 @@ func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfi
pullRegistryAuth = &resolvedConfig pullRegistryAuth = &resolvedConfig
} }
if err := daemon.pullImageWithReference(ctx, ref, platform, nil, pullRegistryAuth, output); err != nil { if err := daemon.pullImageWithReference(ctx, ref, os, nil, pullRegistryAuth, output); err != nil {
return nil, err return nil, err
} }
return daemon.GetImage(name) return daemon.GetImage(name)
@ -172,7 +172,7 @@ func (daemon *Daemon) pullForBuilder(ctx context.Context, name string, authConfi
// leaking of layers. // leaking of layers.
func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) { func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) {
if refOrID == "" { if refOrID == "" {
layer, err := newReleasableLayerForImage(nil, daemon.stores[opts.OS].layerStore) layer, err := newReleasableLayerForImage(nil, daemon.layerStore, opts.OS)
return nil, layer, err return nil, layer, err
} }
@ -183,7 +183,7 @@ func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID st
} }
// TODO: shouldn't we error out if error is different from "not found" ? // TODO: shouldn't we error out if error is different from "not found" ?
if image != nil { if image != nil {
layer, err := newReleasableLayerForImage(image, daemon.stores[opts.OS].layerStore) layer, err := newReleasableLayerForImage(image, daemon.layerStore, image.OperatingSystem())
return image, layer, err return image, layer, err
} }
} }
@ -192,29 +192,26 @@ func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID st
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
layer, err := newReleasableLayerForImage(image, daemon.stores[opts.OS].layerStore) layer, err := newReleasableLayerForImage(image, daemon.layerStore, image.OperatingSystem())
return image, layer, err return image, layer, err
} }
// CreateImage creates a new image by adding a config and ID to the image store. // CreateImage creates a new image by adding a config and ID to the image store.
// This is similar to LoadImage() except that it receives JSON encoded bytes of // This is similar to LoadImage() except that it receives JSON encoded bytes of
// an image instead of a tar archive. // an image instead of a tar archive.
func (daemon *Daemon) CreateImage(config []byte, parent string, platform string) (builder.Image, error) { func (daemon *Daemon) CreateImage(config []byte, parent string) (builder.Image, error) {
if platform == "" { id, err := daemon.imageStore.Create(config)
platform = runtime.GOOS
}
id, err := daemon.stores[platform].imageStore.Create(config)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to create image") return nil, errors.Wrapf(err, "failed to create image")
} }
if parent != "" { if parent != "" {
if err := daemon.stores[platform].imageStore.SetParent(id, image.ID(parent)); err != nil { if err := daemon.imageStore.SetParent(id, image.ID(parent)); err != nil {
return nil, errors.Wrapf(err, "failed to set parent %s", parent) return nil, errors.Wrapf(err, "failed to set parent %s", parent)
} }
} }
return daemon.stores[platform].imageStore.Get(id) return daemon.imageStore.Get(id)
} }
// IDMappings returns uid/gid mappings for the builder // IDMappings returns uid/gid mappings for the builder

View file

@ -7,12 +7,12 @@ import (
) )
// MakeImageCache creates a stateful image cache. // MakeImageCache creates a stateful image cache.
func (daemon *Daemon) MakeImageCache(sourceRefs []string, platform string) builder.ImageCache { func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache {
if len(sourceRefs) == 0 { if len(sourceRefs) == 0 {
return cache.NewLocal(daemon.stores[platform].imageStore) return cache.NewLocal(daemon.imageStore)
} }
cache := cache.New(daemon.stores[platform].imageStore) cache := cache.New(daemon.imageStore)
for _, ref := range sourceRefs { for _, ref := range sourceRefs {
img, err := daemon.GetImage(ref) img, err := daemon.GetImage(ref)

View file

@ -180,17 +180,17 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str
parent = new(image.Image) parent = new(image.Image)
parent.RootFS = image.NewRootFS() parent.RootFS = image.NewRootFS()
} else { } else {
parent, err = daemon.stores[container.OS].imageStore.Get(container.ImageID) parent, err = daemon.imageStore.Get(container.ImageID)
if err != nil { if err != nil {
return "", err return "", err
} }
} }
l, err := daemon.stores[container.OS].layerStore.Register(rwTar, parent.RootFS.ChainID(), layer.OS(container.OS)) l, err := daemon.layerStore.Register(rwTar, parent.RootFS.ChainID(), container.OS)
if err != nil { if err != nil {
return "", err return "", err
} }
defer layer.ReleaseAndLog(daemon.stores[container.OS].layerStore, l) defer layer.ReleaseAndLog(daemon.layerStore, l)
containerConfig := c.ContainerConfig containerConfig := c.ContainerConfig
if containerConfig == nil { if containerConfig == nil {
@ -209,13 +209,13 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str
return "", err return "", err
} }
id, err := daemon.stores[container.OS].imageStore.Create(config) id, err := daemon.imageStore.Create(config)
if err != nil { if err != nil {
return "", err return "", err
} }
if container.ImageID != "" { if container.ImageID != "" {
if err := daemon.stores[container.OS].imageStore.SetParent(id, container.ImageID); err != nil { if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil {
return "", err return "", err
} }
} }
@ -234,7 +234,7 @@ func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (str
return "", err return "", err
} }
} }
if err := daemon.TagImageWithReference(id, container.OS, newTag); err != nil { if err := daemon.TagImageWithReference(id, newTag); err != nil {
return "", err return "", err
} }
imageRef = reference.FamiliarString(newTag) imageRef = reference.FamiliarString(newTag)

View file

@ -257,7 +257,7 @@ func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig)
func (daemon *Daemon) setRWLayer(container *container.Container) error { func (daemon *Daemon) setRWLayer(container *container.Container) error {
var layerID layer.ChainID var layerID layer.ChainID
if container.ImageID != "" { if container.ImageID != "" {
img, err := daemon.stores[container.OS].imageStore.Get(container.ImageID) img, err := daemon.imageStore.Get(container.ImageID)
if err != nil { if err != nil {
return err return err
} }
@ -270,7 +270,7 @@ func (daemon *Daemon) setRWLayer(container *container.Container) error {
StorageOpt: container.HostConfig.StorageOpt, StorageOpt: container.HostConfig.StorageOpt,
} }
rwLayer, err := daemon.stores[container.OS].layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts) rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.OS, rwLayerOpts)
if err != nil { if err != nil {
return err return err
} }

View file

@ -69,50 +69,46 @@ var (
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform") errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
) )
type daemonStore struct {
graphDriver string
imageRoot string
imageStore image.Store
layerStore layer.Store
distributionMetadataStore dmetadata.Store
}
// Daemon holds information about the Docker daemon. // Daemon holds information about the Docker daemon.
type Daemon struct { type Daemon struct {
ID string ID string
repository string repository string
containers container.Store containers container.Store
containersReplica container.ViewDB containersReplica container.ViewDB
execCommands *exec.Store execCommands *exec.Store
downloadManager *xfer.LayerDownloadManager downloadManager *xfer.LayerDownloadManager
uploadManager *xfer.LayerUploadManager uploadManager *xfer.LayerUploadManager
trustKey libtrust.PrivateKey trustKey libtrust.PrivateKey
idIndex *truncindex.TruncIndex idIndex *truncindex.TruncIndex
configStore *config.Config configStore *config.Config
statsCollector *stats.Collector statsCollector *stats.Collector
defaultLogConfig containertypes.LogConfig defaultLogConfig containertypes.LogConfig
RegistryService registry.Service RegistryService registry.Service
EventsService *events.Events EventsService *events.Events
netController libnetwork.NetworkController netController libnetwork.NetworkController
volumes *store.VolumeStore volumes *store.VolumeStore
discoveryWatcher discovery.Reloader discoveryWatcher discovery.Reloader
root string root string
seccompEnabled bool seccompEnabled bool
apparmorEnabled bool apparmorEnabled bool
shutdown bool shutdown bool
idMappings *idtools.IDMappings idMappings *idtools.IDMappings
stores map[string]daemonStore // By container target platform graphDrivers map[string]string // By operating system
referenceStore refstore.Store referenceStore refstore.Store
PluginStore *plugin.Store // todo: remove imageStore image.Store
pluginManager *plugin.Manager imageRoot string
linkIndex *linkIndex layerStore layer.Store
containerd libcontainerd.Client distributionMetadataStore dmetadata.Store
containerdRemote libcontainerd.Remote PluginStore *plugin.Store // todo: remove
defaultIsolation containertypes.Isolation // Default isolation mode on Windows pluginManager *plugin.Manager
clusterProvider cluster.Provider linkIndex *linkIndex
cluster Cluster containerd libcontainerd.Client
genericResources []swarm.GenericResource containerdRemote libcontainerd.Remote
metricsPluginListener net.Listener defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
cluster Cluster
genericResources []swarm.GenericResource
metricsPluginListener net.Listener
machineMemory uint64 machineMemory uint64
@ -161,9 +157,9 @@ func (daemon *Daemon) restore() error {
} }
// Ignore the container if it does not support the current driver being used by the graph // Ignore the container if it does not support the current driver being used by the graph
currentDriverForContainerOS := daemon.stores[container.OS].graphDriver currentDriverForContainerOS := daemon.graphDrivers[container.OS]
if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS { if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS {
rwlayer, err := daemon.stores[container.OS].layerStore.GetRWLayer(container.ID) rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
if err != nil { if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err) logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue continue
@ -706,11 +702,11 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
// lcow. Unix platforms however run a single graphdriver for all containers, and it can // lcow. Unix platforms however run a single graphdriver for all containers, and it can
// be set through an environment variable, a daemon start parameter, or chosen through // be set through an environment variable, a daemon start parameter, or chosen through
// initialization of the layerstore through driver priority order for example. // initialization of the layerstore through driver priority order for example.
d.stores = make(map[string]daemonStore) d.graphDrivers = make(map[string]string)
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
d.stores["windows"] = daemonStore{graphDriver: "windowsfilter"} d.graphDrivers[runtime.GOOS] = "windowsfilter"
if system.LCOWSupported() { if system.LCOWSupported() {
d.stores["linux"] = daemonStore{graphDriver: "lcow"} d.graphDrivers["linux"] = "lcow"
} }
} else { } else {
driverName := os.Getenv("DOCKER_DRIVER") driverName := os.Getenv("DOCKER_DRIVER")
@ -719,7 +715,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
} else { } else {
logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
} }
d.stores[runtime.GOOS] = daemonStore{graphDriver: driverName} // May still be empty. Layerstore init determines instead. d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead.
} }
d.RegistryService = registryService d.RegistryService = registryService
@ -750,55 +746,43 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
return nil, errors.Wrap(err, "couldn't create plugin manager") return nil, errors.Wrap(err, "couldn't create plugin manager")
} }
var graphDrivers []string d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
for operatingSystem, ds := range d.stores { Root: config.Root,
ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
StorePath: config.Root, GraphDrivers: d.graphDrivers,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), GraphDriverOptions: config.GraphOptions,
GraphDriver: ds.graphDriver, IDMappings: idMappings,
GraphDriverOptions: config.GraphOptions, PluginGetter: d.PluginStore,
IDMappings: idMappings, ExperimentalEnabled: config.Experimental,
PluginGetter: d.PluginStore, })
ExperimentalEnabled: config.Experimental, if err != nil {
OS: operatingSystem, return nil, err
})
if err != nil {
return nil, err
}
ds.graphDriver = ls.DriverName() // As layerstore may set the driver
ds.layerStore = ls
d.stores[operatingSystem] = ds
graphDrivers = append(graphDrivers, ls.DriverName())
} }
// Configure and validate the kernels security support // As layerstore may set the driver
if err := configureKernelSecuritySupport(config, graphDrivers); err != nil { for os := range d.graphDrivers {
d.graphDrivers[os] = d.layerStore.DriverName(os)
}
// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
// operation only, so it is safe to pass *just* the runtime OS graphdriver.
if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil {
return nil, err return nil, err
} }
logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
lsMap := make(map[string]layer.Store) d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
for operatingSystem, ds := range d.stores {
lsMap[operatingSystem] = ds.layerStore
}
d.downloadManager = xfer.NewLayerDownloadManager(lsMap, *config.MaxConcurrentDownloads)
logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
for operatingSystem, ds := range d.stores {
imageRoot := filepath.Join(config.Root, "image", ds.graphDriver)
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
var is image.Store d.imageRoot = filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS])
is, err = image.NewImageStore(ifs, operatingSystem, ds.layerStore) ifs, err := image.NewFSStoreBackend(filepath.Join(d.imageRoot, "imagedb"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
ds.imageRoot = imageRoot d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
ds.imageStore = is if err != nil {
d.stores[operatingSystem] = ds return nil, err
} }
// Configure the volumes driver // Configure the volumes driver
@ -830,30 +814,25 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
// operating systems, the list of graphdrivers available isn't user configurable. // operating systems, the list of graphdrivers available isn't user configurable.
// For backwards compatibility, we just put it under the windowsfilter // For backwards compatibility, we just put it under the windowsfilter
// directory regardless. // directory regardless.
refStoreLocation := filepath.Join(d.stores[runtime.GOOS].imageRoot, `repositories.json`) refStoreLocation := filepath.Join(d.imageRoot, `repositories.json`)
rs, err := refstore.NewReferenceStore(refStoreLocation) rs, err := refstore.NewReferenceStore(refStoreLocation)
if err != nil { if err != nil {
return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) return nil, fmt.Errorf("Couldn't create reference store repository: %s", err)
} }
d.referenceStore = rs d.referenceStore = rs
for platform, ds := range d.stores { d.distributionMetadataStore, err = dmetadata.NewFSMetadataStore(filepath.Join(d.imageRoot, "distribution"))
dms, err := dmetadata.NewFSMetadataStore(filepath.Join(ds.imageRoot, "distribution"), platform) if err != nil {
if err != nil { return nil, err
return nil, err }
}
ds.distributionMetadataStore = dms // No content-addressability migration on Windows as it never supported pre-CA
d.stores[platform] = ds if runtime.GOOS != "windows" {
migrationStart := time.Now()
// No content-addressability migration on Windows as it never supported pre-CA if err := v1.Migrate(config.Root, d.graphDrivers[runtime.GOOS], d.layerStore, d.imageStore, rs, d.distributionMetadataStore); err != nil {
if runtime.GOOS != "windows" { logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
migrationStart := time.Now()
if err := v1.Migrate(config.Root, ds.graphDriver, ds.layerStore, ds.imageStore, rs, dms); err != nil {
logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
}
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
} }
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
} }
// Discovery is only enabled when the daemon is launched with an address to advertise. When // Discovery is only enabled when the daemon is launched with an address to advertise. When
@ -922,13 +901,13 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
engineMemory.Set(float64(info.MemTotal)) engineMemory.Set(float64(info.MemTotal))
gd := "" gd := ""
for platform, ds := range d.stores { for os, driver := range d.graphDrivers {
if len(gd) > 0 { if len(gd) > 0 {
gd += ", " gd += ", "
} }
gd += ds.graphDriver gd += driver
if len(d.stores) > 1 { if len(d.graphDrivers) > 1 {
gd = fmt.Sprintf("%s (%s)", gd, platform) gd = fmt.Sprintf("%s (%s)", gd, os)
} }
} }
logrus.WithFields(logrus.Fields{ logrus.WithFields(logrus.Fields{
@ -1009,7 +988,7 @@ func (daemon *Daemon) Shutdown() error {
logrus.Errorf("Stop container error: %v", err) logrus.Errorf("Stop container error: %v", err)
return return
} }
if mountid, err := daemon.stores[c.OS].layerStore.GetMountID(c.ID); err == nil { if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
daemon.cleanupMountsByID(mountid) daemon.cleanupMountsByID(mountid)
} }
logrus.Debugf("container stopped %s", c.ID) logrus.Debugf("container stopped %s", c.ID)
@ -1022,12 +1001,8 @@ func (daemon *Daemon) Shutdown() error {
} }
} }
for platform, ds := range daemon.stores { if err := daemon.layerStore.Cleanup(); err != nil {
if ds.layerStore != nil { logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
if err := ds.layerStore.Cleanup(); err != nil {
logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, platform)
}
}
} }
// If we are part of a cluster, clean up cluster's stuff // If we are part of a cluster, clean up cluster's stuff
@ -1107,8 +1082,8 @@ func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
} }
// GraphDriverName returns the name of the graph driver used by the layer.Store // GraphDriverName returns the name of the graph driver used by the layer.Store
func (daemon *Daemon) GraphDriverName(platform string) string { func (daemon *Daemon) GraphDriverName(os string) string {
return daemon.stores[platform].layerStore.DriverName() return daemon.layerStore.DriverName(os)
} }
// prepareTempDir prepares and returns the default directory to use // prepareTempDir prepares and returns the default directory to use

View file

@ -814,22 +814,14 @@ func overlaySupportsSelinux() (bool, error) {
} }
// configureKernelSecuritySupport configures and validates security support for the kernel // configureKernelSecuritySupport configures and validates security support for the kernel
func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { func configureKernelSecuritySupport(config *config.Config, driverName string) error {
if config.EnableSelinuxSupport { if config.EnableSelinuxSupport {
if !selinuxEnabled() { if !selinuxEnabled() {
logrus.Warn("Docker could not enable SELinux on the host system") logrus.Warn("Docker could not enable SELinux on the host system")
return nil return nil
} }
overlayFound := false if driverName == "overlay" || driverName == "overlay2" {
for _, d := range driverNames {
if d == "overlay" || d == "overlay2" {
overlayFound = true
break
}
}
if overlayFound {
// If driver is overlay or overlay2, make sure kernel // If driver is overlay or overlay2, make sure kernel
// supports selinux with overlay. // supports selinux with overlay.
supported, err := overlaySupportsSelinux() supported, err := overlaySupportsSelinux()
@ -838,7 +830,7 @@ func configureKernelSecuritySupport(config *config.Config, driverNames []string)
} }
if !supported { if !supported {
logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverNames) logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName)
} }
} }
} else { } else {

View file

@ -262,7 +262,7 @@ func ensureServicesInstalled(services []string) error {
} }
// configureKernelSecuritySupport configures and validate security support for the kernel // configureKernelSecuritySupport configures and validate security support for the kernel
func configureKernelSecuritySupport(config *config.Config, driverNames []string) error { func configureKernelSecuritySupport(config *config.Config, driverName string) error {
return nil return nil
} }

View file

@ -118,7 +118,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
// When container creation fails and `RWLayer` has not been created yet, we // When container creation fails and `RWLayer` has not been created yet, we
// do not call `ReleaseRWLayer` // do not call `ReleaseRWLayer`
if container.RWLayer != nil { if container.RWLayer != nil {
metadata, err := daemon.stores[container.OS].layerStore.ReleaseRWLayer(container.RWLayer) metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer)
layer.LogReleaseMetadata(metadata) layer.LogReleaseMetadata(metadata)
if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) { if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) {
e := errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.OS), container.ID) e := errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.OS), container.ID)

View file

@ -15,12 +15,12 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func (daemon *Daemon) getLayerRefs(platform string) map[layer.ChainID]int { func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int {
tmpImages := daemon.stores[platform].imageStore.Map() tmpImages := daemon.imageStore.Map()
layerRefs := map[layer.ChainID]int{} layerRefs := map[layer.ChainID]int{}
for id, img := range tmpImages { for id, img := range tmpImages {
dgst := digest.Digest(id) dgst := digest.Digest(id)
if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 { if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
continue continue
} }
@ -53,7 +53,6 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er
} }
// Get all top images with extra attributes // Get all top images with extra attributes
// TODO @jhowardmsft LCOW. This may need revisiting
allImages, err := daemon.Images(filters.NewArgs(), false, true) allImages, err := daemon.Images(filters.NewArgs(), false, true)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to retrieve image list: %v", err) return nil, fmt.Errorf("failed to retrieve image list: %v", err)
@ -96,24 +95,22 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er
// Get total layers size on disk // Get total layers size on disk
var allLayersSize int64 var allLayersSize int64
for platform := range daemon.stores { layerRefs := daemon.getLayerRefs()
layerRefs := daemon.getLayerRefs(platform) allLayers := daemon.layerStore.Map()
allLayers := daemon.stores[platform].layerStore.Map() for _, l := range allLayers {
for _, l := range allLayers { select {
select { case <-ctx.Done():
case <-ctx.Done(): return nil, ctx.Err()
return nil, ctx.Err() default:
default: size, err := l.DiffSize()
size, err := l.DiffSize() if err == nil {
if err == nil { if _, ok := layerRefs[l.ChainID()]; ok {
if _, ok := layerRefs[l.ChainID()]; ok { allLayersSize += size
allLayersSize += size
} else {
logrus.Warnf("found leaked image layer %v platform %s", l.ChainID(), platform)
}
} else { } else {
logrus.Warnf("failed to get diff size for layer %v %s", l.ChainID(), platform) logrus.Warnf("found leaked image layer %v", l.ChainID())
} }
} else {
logrus.Warnf("failed to get diff size for layer %v", l.ChainID())
} }
} }
} }

View file

@ -15,12 +15,12 @@ func (daemon *Daemon) getSize(containerID string) (int64, int64) {
err error err error
) )
rwlayer, err := daemon.stores[runtime.GOOS].layerStore.GetRWLayer(containerID) rwlayer, err := daemon.layerStore.GetRWLayer(containerID)
if err != nil { if err != nil {
logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err)
return sizeRw, sizeRootfs return sizeRw, sizeRootfs
} }
defer daemon.stores[runtime.GOOS].layerStore.ReleaseRWLayer(rwlayer) defer daemon.layerStore.ReleaseRWLayer(rwlayer)
sizeRw, err = rwlayer.Size() sizeRw, err = rwlayer.Size()
if err != nil { if err != nil {

View file

@ -38,32 +38,31 @@ func (daemon *Daemon) GetImageIDAndOS(refOrID string) (image.ID, string, error)
return "", "", errImageDoesNotExist{ref} return "", "", errImageDoesNotExist{ref}
} }
id := image.IDFromDigest(digested.Digest()) id := image.IDFromDigest(digested.Digest())
for platform := range daemon.stores { if img, err := daemon.imageStore.Get(id); err == nil {
if _, err = daemon.stores[platform].imageStore.Get(id); err == nil { return id, img.OperatingSystem(), nil
return id, platform, nil
}
} }
return "", "", errImageDoesNotExist{ref} return "", "", errImageDoesNotExist{ref}
} }
if digest, err := daemon.referenceStore.Get(namedRef); err == nil { if digest, err := daemon.referenceStore.Get(namedRef); err == nil {
// Search the image stores to get the operating system, defaulting to host OS. // Search the image stores to get the operating system, defaulting to host OS.
imageOS := runtime.GOOS
id := image.IDFromDigest(digest) id := image.IDFromDigest(digest)
for os := range daemon.stores { if img, err := daemon.imageStore.Get(id); err == nil {
if img, err := daemon.stores[os].imageStore.Get(id); err == nil { return id, img.OperatingSystem(), nil
imageOS = img.OperatingSystem()
break
}
} }
return id, imageOS, nil
} }
// Search based on ID // Search based on ID
for os := range daemon.stores { if id, err := daemon.imageStore.Search(refOrID); err == nil {
if id, err := daemon.stores[os].imageStore.Search(refOrID); err == nil { img, err := daemon.imageStore.Get(id)
return id, os, nil if err != nil {
return "", "", errImageDoesNotExist{ref}
} }
imageOS := img.OperatingSystem()
if imageOS == "" {
imageOS = runtime.GOOS
}
return id, imageOS, nil
} }
return "", "", errImageDoesNotExist{ref} return "", "", errImageDoesNotExist{ref}
@ -71,9 +70,9 @@ func (daemon *Daemon) GetImageIDAndOS(refOrID string) (image.ID, string, error)
// GetImage returns an image corresponding to the image referred to by refOrID. // GetImage returns an image corresponding to the image referred to by refOrID.
func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
imgID, os, err := daemon.GetImageIDAndOS(refOrID) imgID, _, err := daemon.GetImageIDAndOS(refOrID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return daemon.stores[os].imageStore.Get(imgID) return daemon.imageStore.Get(imgID)
} }

View file

@ -66,7 +66,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
start := time.Now() start := time.Now()
records := []types.ImageDeleteResponseItem{} records := []types.ImageDeleteResponseItem{}
imgID, os, err := daemon.GetImageIDAndOS(imageRef) imgID, _, err := daemon.GetImageIDAndOS(imageRef)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -95,7 +95,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
return nil, err return nil, err
} }
parsedRef, err = daemon.removeImageRef(os, parsedRef) parsedRef, err = daemon.removeImageRef(parsedRef)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -123,7 +123,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
remainingRefs := []reference.Named{} remainingRefs := []reference.Named{}
for _, repoRef := range repoRefs { for _, repoRef := range repoRefs {
if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() {
if _, err := daemon.removeImageRef(os, repoRef); err != nil { if _, err := daemon.removeImageRef(repoRef); err != nil {
return records, err return records, err
} }
@ -153,12 +153,12 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
if !force { if !force {
c |= conflictSoft &^ conflictActiveReference c |= conflictSoft &^ conflictActiveReference
} }
if conflict := daemon.checkImageDeleteConflict(imgID, os, c); conflict != nil { if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
return nil, conflict return nil, conflict
} }
for _, repoRef := range repoRefs { for _, repoRef := range repoRefs {
parsedRef, err := daemon.removeImageRef(os, repoRef) parsedRef, err := daemon.removeImageRef(repoRef)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -171,7 +171,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
} }
} }
if err := daemon.imageDeleteHelper(imgID, os, &records, force, prune, removedRepositoryRef); err != nil { if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil {
return nil, err return nil, err
} }
@ -232,7 +232,7 @@ func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Contai
// repositoryRef must not be an image ID but a repository name followed by an // repositoryRef must not be an image ID but a repository name followed by an
// optional tag or digest reference. If tag or digest is omitted, the default // optional tag or digest reference. If tag or digest is omitted, the default
// tag is used. Returns the resolved image reference and an error. // tag is used. Returns the resolved image reference and an error.
func (daemon *Daemon) removeImageRef(platform string, ref reference.Named) (reference.Named, error) { func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) {
ref = reference.TagNameOnly(ref) ref = reference.TagNameOnly(ref)
// Ignore the boolean value returned, as far as we're concerned, this // Ignore the boolean value returned, as far as we're concerned, this
@ -248,11 +248,11 @@ func (daemon *Daemon) removeImageRef(platform string, ref reference.Named) (refe
// on the first encountered error. Removed references are logged to this // on the first encountered error. Removed references are logged to this
// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the // daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the
// given list of records. // given list of records.
func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem) error { func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error {
imageRefs := daemon.referenceStore.References(imgID.Digest()) imageRefs := daemon.referenceStore.References(imgID.Digest())
for _, imageRef := range imageRefs { for _, imageRef := range imageRefs {
parsedRef, err := daemon.removeImageRef(platform, imageRef) parsedRef, err := daemon.removeImageRef(imageRef)
if err != nil { if err != nil {
return err return err
} }
@ -299,15 +299,15 @@ func (idc *imageDeleteConflict) Conflict() {}
// conflict is encountered, it will be returned immediately without deleting // conflict is encountered, it will be returned immediately without deleting
// the image. If quiet is true, any encountered conflicts will be ignored and // the image. If quiet is true, any encountered conflicts will be ignored and
// the function will return nil immediately without deleting the image. // the function will return nil immediately without deleting the image.
func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error {
// First, determine if this image has any conflicts. Ignore soft conflicts // First, determine if this image has any conflicts. Ignore soft conflicts
// if force is true. // if force is true.
c := conflictHard c := conflictHard
if !force { if !force {
c |= conflictSoft c |= conflictSoft
} }
if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil { if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
if quiet && (!daemon.imageIsDangling(imgID, platform) || conflict.used) { if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) {
// Ignore conflicts UNLESS the image is "dangling" or not being used in // Ignore conflicts UNLESS the image is "dangling" or not being used in
// which case we want the user to know. // which case we want the user to know.
return nil return nil
@ -318,18 +318,18 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records
return conflict return conflict
} }
parent, err := daemon.stores[platform].imageStore.GetParent(imgID) parent, err := daemon.imageStore.GetParent(imgID)
if err != nil { if err != nil {
// There may be no parent // There may be no parent
parent = "" parent = ""
} }
// Delete all repository tag/digest references to this image. // Delete all repository tag/digest references to this image.
if err := daemon.removeAllReferencesToImageID(imgID, platform, records); err != nil { if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil {
return err return err
} }
removedLayers, err := daemon.stores[platform].imageStore.Delete(imgID) removedLayers, err := daemon.imageStore.Delete(imgID)
if err != nil { if err != nil {
return err return err
} }
@ -349,7 +349,7 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records
// either running or stopped). // either running or stopped).
// Do not force prunings, but do so quietly (stopping on any encountered // Do not force prunings, but do so quietly (stopping on any encountered
// conflicts). // conflicts).
return daemon.imageDeleteHelper(parent, platform, records, false, true, true) return daemon.imageDeleteHelper(parent, records, false, true, true)
} }
// checkImageDeleteConflict determines whether there are any conflicts // checkImageDeleteConflict determines whether there are any conflicts
@ -358,9 +358,9 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, platform string, records
// using the image. A soft conflict is any tags/digest referencing the given // using the image. A soft conflict is any tags/digest referencing the given
// image or any stopped container using the image. If ignoreSoftConflicts is // image or any stopped container using the image. If ignoreSoftConflicts is
// true, this function will not check for soft conflict conditions. // true, this function will not check for soft conflict conditions.
func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, platform string, mask conflictType) *imageDeleteConflict { func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict {
// Check if the image has any descendant images. // Check if the image has any descendant images.
if mask&conflictDependentChild != 0 && len(daemon.stores[platform].imageStore.Children(imgID)) > 0 { if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 {
return &imageDeleteConflict{ return &imageDeleteConflict{
hard: true, hard: true,
imgID: imgID, imgID: imgID,
@ -411,6 +411,6 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, platform string,
// imageIsDangling returns whether the given image is "dangling" which means // imageIsDangling returns whether the given image is "dangling" which means
// that there are no repository references to the given image and it has no // that there are no repository references to the given image and it has no
// child images. // child images.
func (daemon *Daemon) imageIsDangling(imgID image.ID, platform string) bool { func (daemon *Daemon) imageIsDangling(imgID image.ID) bool {
return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.stores[platform].imageStore.Children(imgID)) > 0) return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0)
} }

View file

@ -2,10 +2,8 @@ package daemon
import ( import (
"io" "io"
"runtime"
"github.com/docker/docker/image/tarexport" "github.com/docker/docker/image/tarexport"
"github.com/docker/docker/pkg/system"
) )
// ExportImage exports a list of images to the given output stream. The // ExportImage exports a list of images to the given output stream. The
@ -14,12 +12,7 @@ import (
// the same tag are exported. names is the set of tags to export, and // the same tag are exported. names is the set of tags to export, and
// outStream is the writer which the images are written to. // outStream is the writer which the images are written to.
func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
// TODO @jhowardmsft LCOW. This will need revisiting later. imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon)
platform := runtime.GOOS
if system.LCOWSupported() {
platform = "linux"
}
imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.referenceStore, daemon)
return imageExporter.Save(names, outStream) return imageExporter.Save(names, outStream)
} }
@ -27,11 +20,6 @@ func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error {
// complement of ImageExport. The input stream is an uncompressed tar // complement of ImageExport. The input stream is an uncompressed tar
// ball containing images and metadata. // ball containing images and metadata.
func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
// TODO @jhowardmsft LCOW. This will need revisiting later. imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon)
platform := runtime.GOOS
if system.LCOWSupported() {
platform = "linux"
}
imageExporter := tarexport.NewTarExporter(daemon.stores[platform].imageStore, daemon.stores[platform].layerStore, daemon.referenceStore, daemon)
return imageExporter.Load(inTar, outStream, quiet) return imageExporter.Load(inTar, outStream, quiet)
} }

View file

@ -2,7 +2,6 @@ package daemon
import ( import (
"fmt" "fmt"
"runtime"
"time" "time"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
@ -19,12 +18,6 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e
return nil, err return nil, err
} }
// If the image OS isn't set, assume it's the host OS
platform := img.OS
if platform == "" {
platform = runtime.GOOS
}
history := []*image.HistoryResponseItem{} history := []*image.HistoryResponseItem{}
layerCounter := 0 layerCounter := 0
@ -40,12 +33,12 @@ func (daemon *Daemon) ImageHistory(name string) ([]*image.HistoryResponseItem, e
} }
rootFS.Append(img.RootFS.DiffIDs[layerCounter]) rootFS.Append(img.RootFS.DiffIDs[layerCounter])
l, err := daemon.stores[platform].layerStore.Get(rootFS.ChainID()) l, err := daemon.layerStore.Get(rootFS.ChainID())
if err != nil { if err != nil {
return nil, err return nil, err
} }
layerSize, err = l.DiffSize() layerSize, err = l.DiffSize()
layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) layer.ReleaseAndLog(daemon.layerStore, l)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -19,9 +19,9 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
} }
// If the image OS isn't set, assume it's the host OS // If the image OS isn't set, assume it's the host OS
platform := img.OS os := img.OS
if platform == "" { if os == "" {
platform = runtime.GOOS os = runtime.GOOS
} }
refs := daemon.referenceStore.References(img.ID().Digest()) refs := daemon.referenceStore.References(img.ID().Digest())
@ -40,11 +40,11 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
var layerMetadata map[string]string var layerMetadata map[string]string
layerID := img.RootFS.ChainID() layerID := img.RootFS.ChainID()
if layerID != "" { if layerID != "" {
l, err := daemon.stores[platform].layerStore.Get(layerID) l, err := daemon.layerStore.Get(layerID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) defer layer.ReleaseAndLog(daemon.layerStore, l)
size, err = l.Size() size, err = l.Size()
if err != nil { if err != nil {
return nil, err return nil, err
@ -61,7 +61,7 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
comment = img.History[len(img.History)-1].Comment comment = img.History[len(img.History)-1].Comment
} }
lastUpdated, err := daemon.stores[platform].imageStore.GetLastUpdated(img.ID()) lastUpdated, err := daemon.imageStore.GetLastUpdated(img.ID())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -79,7 +79,7 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
Author: img.Author, Author: img.Author,
Config: img.Config, Config: img.Config,
Architecture: img.Architecture, Architecture: img.Architecture,
Os: platform, Os: os,
OsVersion: img.OSVersion, OsVersion: img.OSVersion,
Size: size, Size: size,
VirtualSize: size, // TODO: field unused, deprecate VirtualSize: size, // TODO: field unused, deprecate
@ -89,7 +89,7 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
}, },
} }
imageInspect.GraphDriver.Name = daemon.GraphDriverName(platform) imageInspect.GraphDriver.Name = daemon.GraphDriverName(os)
imageInspect.GraphDriver.Data = layerMetadata imageInspect.GraphDriver.Data = layerMetadata
return imageInspect, nil return imageInspect, nil

View file

@ -19,7 +19,7 @@ import (
// PullImage initiates a pull operation. image is the repository name to pull, and // PullImage initiates a pull operation. image is the repository name to pull, and
// tag may be either empty, or indicate a specific tag to pull. // tag may be either empty, or indicate a specific tag to pull.
func (daemon *Daemon) PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { func (daemon *Daemon) PullImage(ctx context.Context, image, tag, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
// Special case: "pull -a" may send an image name with a // Special case: "pull -a" may send an image name with a
// trailing :. This is ugly, but let's not break API // trailing :. This is ugly, but let's not break API
// compatibility. // compatibility.
@ -44,10 +44,10 @@ func (daemon *Daemon) PullImage(ctx context.Context, image, tag, platform string
} }
} }
return daemon.pullImageWithReference(ctx, ref, platform, metaHeaders, authConfig, outStream) return daemon.pullImageWithReference(ctx, ref, os, metaHeaders, authConfig, outStream)
} }
func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
// Include a buffer so that slow client connections don't affect // Include a buffer so that slow client connections don't affect
// transfer performance. // transfer performance.
progressChan := make(chan progress.Progress, 100) progressChan := make(chan progress.Progress, 100)
@ -62,8 +62,8 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.
}() }()
// Default to the host OS platform in case it hasn't been populated with an explicit value. // Default to the host OS platform in case it hasn't been populated with an explicit value.
if platform == "" { if os == "" {
platform = runtime.GOOS os = runtime.GOOS
} }
imagePullConfig := &distribution.ImagePullConfig{ imagePullConfig := &distribution.ImagePullConfig{
@ -73,13 +73,13 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.
ProgressOutput: progress.ChanOutput(progressChan), ProgressOutput: progress.ChanOutput(progressChan),
RegistryService: daemon.RegistryService, RegistryService: daemon.RegistryService,
ImageEventLogger: daemon.LogImageEvent, ImageEventLogger: daemon.LogImageEvent,
MetadataStore: daemon.stores[platform].distributionMetadataStore, MetadataStore: daemon.distributionMetadataStore,
ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore), ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore),
ReferenceStore: daemon.referenceStore, ReferenceStore: daemon.referenceStore,
}, },
DownloadManager: daemon.downloadManager, DownloadManager: daemon.downloadManager,
Schema2Types: distribution.ImageTypes, Schema2Types: distribution.ImageTypes,
Platform: platform, OS: os,
} }
err := distribution.Pull(ctx, ref, imagePullConfig) err := distribution.Pull(ctx, ref, imagePullConfig)

View file

@ -2,7 +2,6 @@ package daemon
import ( import (
"io" "io"
"runtime"
"github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
@ -10,7 +9,6 @@ import (
"github.com/docker/docker/distribution" "github.com/docker/docker/distribution"
progressutils "github.com/docker/docker/distribution/utils" progressutils "github.com/docker/docker/distribution/utils"
"github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/system"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -41,12 +39,6 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead
close(writesDone) close(writesDone)
}() }()
// TODO @jhowardmsft LCOW Support. This will require revisiting. For now, hard-code.
platform := runtime.GOOS
if system.LCOWSupported() {
platform = "linux"
}
imagePushConfig := &distribution.ImagePushConfig{ imagePushConfig := &distribution.ImagePushConfig{
Config: distribution.Config{ Config: distribution.Config{
MetaHeaders: metaHeaders, MetaHeaders: metaHeaders,
@ -54,12 +46,12 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead
ProgressOutput: progress.ChanOutput(progressChan), ProgressOutput: progress.ChanOutput(progressChan),
RegistryService: daemon.RegistryService, RegistryService: daemon.RegistryService,
ImageEventLogger: daemon.LogImageEvent, ImageEventLogger: daemon.LogImageEvent,
MetadataStore: daemon.stores[platform].distributionMetadataStore, MetadataStore: daemon.distributionMetadataStore,
ImageStore: distribution.NewImageConfigStoreFromStore(daemon.stores[platform].imageStore), ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore),
ReferenceStore: daemon.referenceStore, ReferenceStore: daemon.referenceStore,
}, },
ConfigMediaType: schema2.MediaTypeImageConfig, ConfigMediaType: schema2.MediaTypeImageConfig,
LayerStore: distribution.NewLayerProviderFromStore(daemon.stores[platform].layerStore), LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore),
TrustKey: daemon.trustKey, TrustKey: daemon.trustKey,
UploadManager: daemon.uploadManager, UploadManager: daemon.uploadManager,
} }

View file

@ -8,7 +8,7 @@ import (
// TagImage creates the tag specified by newTag, pointing to the image named // TagImage creates the tag specified by newTag, pointing to the image named
// imageName (alternatively, imageName can also be an image ID). // imageName (alternatively, imageName can also be an image ID).
func (daemon *Daemon) TagImage(imageName, repository, tag string) error { func (daemon *Daemon) TagImage(imageName, repository, tag string) error {
imageID, os, err := daemon.GetImageIDAndOS(imageName) imageID, _, err := daemon.GetImageIDAndOS(imageName)
if err != nil { if err != nil {
return err return err
} }
@ -23,16 +23,16 @@ func (daemon *Daemon) TagImage(imageName, repository, tag string) error {
} }
} }
return daemon.TagImageWithReference(imageID, os, newTag) return daemon.TagImageWithReference(imageID, newTag)
} }
// TagImageWithReference adds the given reference to the image ID provided. // TagImageWithReference adds the given reference to the image ID provided.
func (daemon *Daemon) TagImageWithReference(imageID image.ID, os string, newTag reference.Named) error { func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error {
if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil {
return err return err
} }
if err := daemon.stores[os].imageStore.SetLastUpdated(imageID); err != nil { if err := daemon.imageStore.SetLastUpdated(imageID); err != nil {
return err return err
} }
daemon.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") daemon.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag")

View file

@ -3,7 +3,6 @@ package daemon
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"runtime"
"sort" "sort"
"time" "time"
@ -15,7 +14,6 @@ import (
"github.com/docker/docker/container" "github.com/docker/docker/container"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/layer" "github.com/docker/docker/layer"
"github.com/docker/docker/pkg/system"
) )
var acceptedImageFilterTags = map[string]bool{ var acceptedImageFilterTags = map[string]bool{
@ -36,12 +34,7 @@ func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created }
// Map returns a map of all images in the ImageStore // Map returns a map of all images in the ImageStore
func (daemon *Daemon) Map() map[image.ID]*image.Image { func (daemon *Daemon) Map() map[image.ID]*image.Image {
// TODO @jhowardmsft LCOW. This can be removed when imagestores are coalesced return daemon.imageStore.Map()
platform := runtime.GOOS
if system.LCOWSupported() {
platform = "linux"
}
return daemon.stores[platform].imageStore.Map()
} }
// Images returns a filtered list of images. filterArgs is a JSON-encoded set // Images returns a filtered list of images. filterArgs is a JSON-encoded set
@ -50,13 +43,6 @@ func (daemon *Daemon) Map() map[image.ID]*image.Image {
// named all controls whether all images in the graph are filtered, or just // named all controls whether all images in the graph are filtered, or just
// the heads. // the heads.
func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) {
// TODO @jhowardmsft LCOW. This can be removed when imagestores are coalesced
platform := runtime.GOOS
if system.LCOWSupported() {
platform = "linux"
}
var ( var (
allImages map[image.ID]*image.Image allImages map[image.ID]*image.Image
err error err error
@ -75,9 +61,9 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
} }
} }
if danglingOnly { if danglingOnly {
allImages = daemon.stores[platform].imageStore.Heads() allImages = daemon.imageStore.Heads()
} else { } else {
allImages = daemon.stores[platform].imageStore.Map() allImages = daemon.imageStore.Map()
} }
var beforeFilter, sinceFilter *image.Image var beforeFilter, sinceFilter *image.Image
@ -130,7 +116,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
layerID := img.RootFS.ChainID() layerID := img.RootFS.ChainID()
var size int64 var size int64
if layerID != "" { if layerID != "" {
l, err := daemon.stores[platform].layerStore.Get(layerID) l, err := daemon.layerStore.Get(layerID)
if err != nil { if err != nil {
// The layer may have been deleted between the call to `Map()` or // The layer may have been deleted between the call to `Map()` or
// `Heads()` and the call to `Get()`, so we just ignore this error // `Heads()` and the call to `Get()`, so we just ignore this error
@ -141,7 +127,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
} }
size, err = l.Size() size, err = l.Size()
layer.ReleaseAndLog(daemon.stores[platform].layerStore, l) layer.ReleaseAndLog(daemon.layerStore, l)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -171,7 +157,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
} }
} }
if newImage.RepoDigests == nil && newImage.RepoTags == nil { if newImage.RepoDigests == nil && newImage.RepoTags == nil {
if all || len(daemon.stores[platform].imageStore.Children(id)) == 0 { if all || len(daemon.imageStore.Children(id)) == 0 {
if imageFilters.Contains("dangling") && !danglingOnly { if imageFilters.Contains("dangling") && !danglingOnly {
//dangling=false case, so dangling image is not needed //dangling=false case, so dangling image is not needed
@ -193,7 +179,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs
// lazily init variables // lazily init variables
if imagesMap == nil { if imagesMap == nil {
allContainers = daemon.List() allContainers = daemon.List()
allLayers = daemon.stores[platform].layerStore.Map() allLayers = daemon.layerStore.Map()
imagesMap = make(map[*image.Image]*types.ImageSummary) imagesMap = make(map[*image.Image]*types.ImageSummary)
layerRefs = make(map[layer.ChainID]int) layerRefs = make(map[layer.ChainID]int)
} }
@ -261,19 +247,14 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
img *image.Image img *image.Image
err error err error
) )
for _, ds := range daemon.stores { if img, err = daemon.imageStore.Get(image.ID(id)); err != nil {
if img, err = ds.imageStore.Get(image.ID(id)); err == nil {
break
}
}
if err != nil {
return "", err return "", err
} }
var parentImg *image.Image var parentImg *image.Image
var parentChainID layer.ChainID var parentChainID layer.ChainID
if len(parent) != 0 { if len(parent) != 0 {
parentImg, err = daemon.stores[img.OperatingSystem()].imageStore.Get(image.ID(parent)) parentImg, err = daemon.imageStore.Get(image.ID(parent))
if err != nil { if err != nil {
return "", errors.Wrap(err, "error getting specified parent layer") return "", errors.Wrap(err, "error getting specified parent layer")
} }
@ -283,11 +264,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
parentImg = &image.Image{RootFS: rootFS} parentImg = &image.Image{RootFS: rootFS}
} }
l, err := daemon.stores[img.OperatingSystem()].layerStore.Get(img.RootFS.ChainID()) l, err := daemon.layerStore.Get(img.RootFS.ChainID())
if err != nil { if err != nil {
return "", errors.Wrap(err, "error getting image layer") return "", errors.Wrap(err, "error getting image layer")
} }
defer daemon.stores[img.OperatingSystem()].layerStore.Release(l) defer daemon.layerStore.Release(l)
ts, err := l.TarStreamFrom(parentChainID) ts, err := l.TarStreamFrom(parentChainID)
if err != nil { if err != nil {
@ -295,11 +276,11 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
} }
defer ts.Close() defer ts.Close()
newL, err := daemon.stores[img.OperatingSystem()].layerStore.Register(ts, parentChainID, layer.OS(img.OperatingSystem())) newL, err := daemon.layerStore.Register(ts, parentChainID, img.OperatingSystem())
if err != nil { if err != nil {
return "", errors.Wrap(err, "error registering layer") return "", errors.Wrap(err, "error registering layer")
} }
defer daemon.stores[img.OperatingSystem()].layerStore.Release(newL) defer daemon.layerStore.Release(newL)
newImage := *img newImage := *img
newImage.RootFS = nil newImage.RootFS = nil
@ -334,7 +315,7 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
return "", errors.Wrap(err, "error marshalling image config") return "", errors.Wrap(err, "error marshalling image config")
} }
newImgID, err := daemon.stores[img.OperatingSystem()].imageStore.Create(b) newImgID, err := daemon.imageStore.Create(b)
if err != nil { if err != nil {
return "", errors.Wrap(err, "error creating new image after squash") return "", errors.Wrap(err, "error creating new image after squash")
} }

View file

@ -91,11 +91,11 @@ func (daemon *Daemon) ImportImage(src string, repository, os string, tag string,
if err != nil { if err != nil {
return err return err
} }
l, err := daemon.stores[os].layerStore.Register(inflatedLayerData, "", layer.OS(os)) l, err := daemon.layerStore.Register(inflatedLayerData, "", os)
if err != nil { if err != nil {
return err return err
} }
defer layer.ReleaseAndLog(daemon.stores[os].layerStore, l) defer layer.ReleaseAndLog(daemon.layerStore, l)
created := time.Now().UTC() created := time.Now().UTC()
imgConfig, err := json.Marshal(&image.Image{ imgConfig, err := json.Marshal(&image.Image{
@ -120,14 +120,14 @@ func (daemon *Daemon) ImportImage(src string, repository, os string, tag string,
return err return err
} }
id, err := daemon.stores[os].imageStore.Create(imgConfig) id, err := daemon.imageStore.Create(imgConfig)
if err != nil { if err != nil {
return err return err
} }
// FIXME: connect with commit code and call refstore directly // FIXME: connect with commit code and call refstore directly
if newRef != nil { if newRef != nil {
if err := daemon.TagImageWithReference(id, os, newRef); err != nil { if err := daemon.TagImageWithReference(id, newRef); err != nil {
return err return err
} }
} }

View file

@ -78,32 +78,26 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
securityOptions = append(securityOptions, "name=userns") securityOptions = append(securityOptions, "name=userns")
} }
imageCount := 0 var ds [][2]string
drivers := "" drivers := ""
for p, ds := range daemon.stores { for os, gd := range daemon.graphDrivers {
imageCount += len(ds.imageStore.Map()) ds = append(ds, daemon.layerStore.DriverStatus(os)...)
drivers += daemon.GraphDriverName(p) drivers += gd
if len(daemon.stores) > 1 { if len(daemon.graphDrivers) > 1 {
drivers += fmt.Sprintf(" (%s) ", p) drivers += fmt.Sprintf(" (%s) ", os)
} }
} }
// TODO @jhowardmsft LCOW support. For now, hard-code the platform shown for the driver status
p := runtime.GOOS
if system.LCOWSupported() {
p = "linux"
}
drivers = strings.TrimSpace(drivers) drivers = strings.TrimSpace(drivers)
v := &types.Info{ v := &types.Info{
ID: daemon.ID, ID: daemon.ID,
Containers: cRunning + cPaused + cStopped, Containers: cRunning + cPaused + cStopped,
ContainersRunning: cRunning, ContainersRunning: cRunning,
ContainersPaused: cPaused, ContainersPaused: cPaused,
ContainersStopped: cStopped, ContainersStopped: cStopped,
Images: imageCount, Images: len(daemon.imageStore.Map()),
Driver: drivers, Driver: drivers,
DriverStatus: daemon.stores[p].layerStore.DriverStatus(), DriverStatus: ds,
Plugins: daemon.showPluginsInfo(), Plugins: daemon.showPluginsInfo(),
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled,
BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled,

View file

@ -323,7 +323,7 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis
if psFilters.Contains("ancestor") { if psFilters.Contains("ancestor") {
ancestorFilter = true ancestorFilter = true
psFilters.WalkValues("ancestor", func(ancestor string) error { psFilters.WalkValues("ancestor", func(ancestor string) error {
id, os, err := daemon.GetImageIDAndOS(ancestor) id, _, err := daemon.GetImageIDAndOS(ancestor)
if err != nil { if err != nil {
logrus.Warnf("Error while looking up for image %v", ancestor) logrus.Warnf("Error while looking up for image %v", ancestor)
return nil return nil
@ -333,7 +333,7 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis
return nil return nil
} }
// Then walk down the graph and put the imageIds in imagesFilter // Then walk down the graph and put the imageIds in imagesFilter
populateImageFilterByParents(imagesFilter, id, daemon.stores[os].imageStore.Children) populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children)
return nil return nil
}) })
} }

View file

@ -138,9 +138,9 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) {
max := len(img.RootFS.DiffIDs) max := len(img.RootFS.DiffIDs)
for i := 1; i <= max; i++ { for i := 1; i <= max; i++ {
img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i]
layerPath, err := layer.GetLayerPath(daemon.stores[c.OS].layerStore, img.RootFS.ChainID()) layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.stores[c.OS].layerStore, img.RootFS.ChainID(), err) return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err)
} }
// Reverse order, expecting parent most first // Reverse order, expecting parent most first
s.Windows.LayerFolders = append([]string{layerPath}, s.Windows.LayerFolders...) s.Windows.LayerFolders = append([]string{layerPath}, s.Windows.LayerFolders...)
@ -210,15 +210,18 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) {
NetworkSharedContainerName: networkSharedContainerID, NetworkSharedContainerName: networkSharedContainerID,
} }
if img.OS == "windows" { switch img.OS {
case "windows":
if err := daemon.createSpecWindowsFields(c, &s, isHyperV); err != nil { if err := daemon.createSpecWindowsFields(c, &s, isHyperV); err != nil {
return nil, err return nil, err
} }
} else { case "linux":
// TODO @jhowardmsft LCOW Support. Modify this check when running in dual-mode if !system.LCOWSupported() {
if system.LCOWSupported() && img.OS == "linux" { return nil, fmt.Errorf("Linux containers on Windows are not supported")
daemon.createSpecLinuxFields(c, &s)
} }
daemon.createSpecLinuxFields(c, &s)
default:
return nil, fmt.Errorf("Unsupported platform %q", img.OS)
} }
return (*specs.Spec)(&s), nil return (*specs.Spec)(&s), nil

View file

@ -3,7 +3,6 @@ package daemon
import ( import (
"fmt" "fmt"
"regexp" "regexp"
"runtime"
"sync/atomic" "sync/atomic"
"time" "time"
@ -14,7 +13,6 @@ import (
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/layer" "github.com/docker/docker/layer"
"github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/directory"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
"github.com/docker/docker/volume" "github.com/docker/docker/volume"
"github.com/docker/libnetwork" "github.com/docker/libnetwork"
@ -162,12 +160,6 @@ func (daemon *Daemon) VolumesPrune(ctx context.Context, pruneFilters filters.Arg
// ImagesPrune removes unused images // ImagesPrune removes unused images
func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) {
// TODO @jhowardmsft LCOW Support: This will need revisiting later.
platform := runtime.GOOS
if system.LCOWSupported() {
platform = "linux"
}
if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) {
return nil, errPruneRunning return nil, errPruneRunning
} }
@ -197,9 +189,9 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args
var allImages map[image.ID]*image.Image var allImages map[image.ID]*image.Image
if danglingOnly { if danglingOnly {
allImages = daemon.stores[platform].imageStore.Heads() allImages = daemon.imageStore.Heads()
} else { } else {
allImages = daemon.stores[platform].imageStore.Map() allImages = daemon.imageStore.Map()
} }
allContainers := daemon.List() allContainers := daemon.List()
imageRefs := map[string]bool{} imageRefs := map[string]bool{}
@ -213,7 +205,7 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args
} }
// Filter intermediary images and get their unique size // Filter intermediary images and get their unique size
allLayers := daemon.stores[platform].layerStore.Map() allLayers := daemon.layerStore.Map()
topImages := map[image.ID]*image.Image{} topImages := map[image.ID]*image.Image{}
for id, img := range allImages { for id, img := range allImages {
select { select {
@ -221,7 +213,7 @@ func (daemon *Daemon) ImagesPrune(ctx context.Context, pruneFilters filters.Args
return nil, ctx.Err() return nil, ctx.Err()
default: default:
dgst := digest.Digest(id) dgst := digest.Digest(id)
if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.stores[platform].imageStore.Children(id)) != 0 { if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
continue continue
} }
if !until.IsZero() && img.Created.After(until) { if !until.IsZero() && img.Created.After(until) {

View file

@ -222,7 +222,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
if err := daemon.conditionalUnmountOnCleanup(container); err != nil { if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
// FIXME: remove once reference counting for graphdrivers has been refactored // FIXME: remove once reference counting for graphdrivers has been refactored
// Ensure that all the mounts are gone // Ensure that all the mounts are gone
if mountid, err := daemon.stores[container.OS].layerStore.GetMountID(container.ID); err == nil { if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil {
daemon.cleanupMountsByID(mountid) daemon.cleanupMountsByID(mountid)
} }
} }

View file

@ -59,9 +59,9 @@ type ImagePullConfig struct {
// Schema2Types is the valid schema2 configuration types allowed // Schema2Types is the valid schema2 configuration types allowed
// by the pull operation. // by the pull operation.
Schema2Types []string Schema2Types []string
// Platform is the requested platform of the image being pulled to ensure it can be validated // OS is the requested operating system of the image being pulled to ensure it can be validated
// when the host platform supports multiple image operating systems. // when the host OS supports multiple image operating systems.
Platform string OS string
} }
// ImagePushConfig stores push configuration. // ImagePushConfig stores push configuration.
@ -86,7 +86,7 @@ type ImagePushConfig struct {
type ImageConfigStore interface { type ImageConfigStore interface {
Put([]byte) (digest.Digest, error) Put([]byte) (digest.Digest, error)
Get(digest.Digest) ([]byte, error) Get(digest.Digest) ([]byte, error)
RootFSAndOSFromConfig([]byte) (*image.RootFS, layer.OS, error) RootFSAndOSFromConfig([]byte) (*image.RootFS, string, error)
} }
// PushLayerProvider provides layers to be pushed by ChainID. // PushLayerProvider provides layers to be pushed by ChainID.
@ -112,7 +112,7 @@ type RootFSDownloadManager interface {
// returns the final rootfs. // returns the final rootfs.
// Given progress output to track download progress // Given progress output to track download progress
// Returns function to release download resources // Returns function to release download resources
Download(ctx context.Context, initialRootFS image.RootFS, os layer.OS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error)
} }
type imageConfigStore struct { type imageConfigStore struct {
@ -140,7 +140,7 @@ func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) {
return img.RawJSON(), nil return img.RawJSON(), nil
} }
func (s *imageConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) { func (s *imageConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) {
var unmarshalledConfig image.Image var unmarshalledConfig image.Image
if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { if err := json.Unmarshal(c, &unmarshalledConfig); err != nil {
return nil, "", err return nil, "", err
@ -154,11 +154,11 @@ func (s *imageConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer
return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
} }
os := "" os := unmarshalledConfig.OS
if runtime.GOOS == "windows" { if os == "" {
os = unmarshalledConfig.OS os = runtime.GOOS
} }
return unmarshalledConfig.RootFS, layer.OS(os), nil return unmarshalledConfig.RootFS, os, nil
} }
type storeLayerProvider struct { type storeLayerProvider struct {

View file

@ -26,17 +26,15 @@ type Store interface {
type FSMetadataStore struct { type FSMetadataStore struct {
sync.RWMutex sync.RWMutex
basePath string basePath string
platform string
} }
// NewFSMetadataStore creates a new filesystem-based metadata store. // NewFSMetadataStore creates a new filesystem-based metadata store.
func NewFSMetadataStore(basePath, platform string) (*FSMetadataStore, error) { func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) {
if err := os.MkdirAll(basePath, 0700); err != nil { if err := os.MkdirAll(basePath, 0700); err != nil {
return nil, err return nil, err
} }
return &FSMetadataStore{ return &FSMetadataStore{
basePath: basePath, basePath: basePath,
platform: platform,
}, nil }, nil
} }

View file

@ -3,7 +3,6 @@ package metadata
import ( import (
"io/ioutil" "io/ioutil"
"os" "os"
"runtime"
"testing" "testing"
"github.com/docker/docker/layer" "github.com/docker/docker/layer"
@ -17,7 +16,7 @@ func TestV1IDService(t *testing.T) {
} }
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) metadataStore, err := NewFSMetadataStore(tmpDir)
if err != nil { if err != nil {
t.Fatalf("could not create metadata store: %v", err) t.Fatalf("could not create metadata store: %v", err)
} }

View file

@ -6,7 +6,6 @@ import (
"math/rand" "math/rand"
"os" "os"
"reflect" "reflect"
"runtime"
"testing" "testing"
"github.com/docker/docker/layer" "github.com/docker/docker/layer"
@ -20,7 +19,7 @@ func TestV2MetadataService(t *testing.T) {
} }
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
metadataStore, err := NewFSMetadataStore(tmpDir, runtime.GOOS) metadataStore, err := NewFSMetadataStore(tmpDir)
if err != nil { if err != nil {
t.Fatalf("could not create metadata store: %v", err) t.Fatalf("could not create metadata store: %v", err)
} }

View file

@ -21,7 +21,7 @@ type Puller interface {
// Pull tries to pull the image referenced by `tag` // Pull tries to pull the image referenced by `tag`
// Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint.
// //
Pull(ctx context.Context, ref reference.Named, platform string) error Pull(ctx context.Context, ref reference.Named, os string) error
} }
// newPuller returns a Puller interface that will pull from either a v1 or v2 // newPuller returns a Puller interface that will pull from either a v1 or v2
@ -115,12 +115,12 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
continue continue
} }
// Make sure we default the platform if it hasn't been supplied // Make sure we default the OS if it hasn't been supplied
if imagePullConfig.Platform == "" { if imagePullConfig.OS == "" {
imagePullConfig.Platform = runtime.GOOS imagePullConfig.OS = runtime.GOOS
} }
if err := puller.Pull(ctx, ref, imagePullConfig.Platform); err != nil { if err := puller.Pull(ctx, ref, imagePullConfig.OS); err != nil {
// Was this pull cancelled? If so, don't try to fall // Was this pull cancelled? If so, don't try to fall
// back. // back.
fallback := false fallback := false

View file

@ -36,7 +36,7 @@ type v1Puller struct {
session *registry.Session session *registry.Session
} }
func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, platform string) error { func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, os string) error {
if _, isCanonical := ref.(reference.Canonical); isCanonical { if _, isCanonical := ref.(reference.Canonical); isCanonical {
// Allowing fallback, because HTTPS v1 is before HTTP v2 // Allowing fallback, because HTTPS v1 is before HTTP v2
return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}}

View file

@ -62,7 +62,7 @@ type v2Puller struct {
confirmedV2 bool confirmedV2 bool
} }
func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform string) (err error) { func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, os string) (err error) {
// TODO(tiborvass): was ReceiveTimeout // TODO(tiborvass): was ReceiveTimeout
p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
if err != nil { if err != nil {
@ -70,7 +70,7 @@ func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform strin
return err return err
} }
if err = p.pullV2Repository(ctx, ref, platform); err != nil { if err = p.pullV2Repository(ctx, ref, os); err != nil {
if _, ok := err.(fallbackError); ok { if _, ok := err.(fallbackError); ok {
return err return err
} }
@ -85,10 +85,10 @@ func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform strin
return err return err
} }
func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform string) (err error) { func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, os string) (err error) {
var layersDownloaded bool var layersDownloaded bool
if !reference.IsNameOnly(ref) { if !reference.IsNameOnly(ref) {
layersDownloaded, err = p.pullV2Tag(ctx, ref, platform) layersDownloaded, err = p.pullV2Tag(ctx, ref, os)
if err != nil { if err != nil {
return err return err
} }
@ -110,7 +110,7 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, pl
if err != nil { if err != nil {
return err return err
} }
pulledNew, err := p.pullV2Tag(ctx, tagRef, platform) pulledNew, err := p.pullV2Tag(ctx, tagRef, os)
if err != nil { if err != nil {
// Since this is the pull-all-tags case, don't // Since this is the pull-all-tags case, don't
// allow an error pulling a particular tag to // allow an error pulling a particular tag to
@ -488,9 +488,9 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unv
descriptors = append(descriptors, layerDescriptor) descriptors = append(descriptors, layerDescriptor)
} }
// The v1 manifest itself doesn't directly contain a platform. However, // The v1 manifest itself doesn't directly contain an OS. However,
// the history does, but unfortunately that's a string, so search through // the history does, but unfortunately that's a string, so search through
// all the history until hopefully we find one which indicates the os. // all the history until hopefully we find one which indicates the OS.
// supertest2014/nyan is an example of a registry image with schemav1. // supertest2014/nyan is an example of a registry image with schemav1.
configOS := runtime.GOOS configOS := runtime.GOOS
if system.LCOWSupported() { if system.LCOWSupported() {
@ -514,7 +514,7 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unv
return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS) return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
} }
resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, layer.OS(configOS), descriptors, p.config.ProgressOutput) resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput)
if err != nil { if err != nil {
return "", "", err return "", "", err
} }
@ -588,7 +588,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
downloadedRootFS *image.RootFS // rootFS from registered layers downloadedRootFS *image.RootFS // rootFS from registered layers
configRootFS *image.RootFS // rootFS from configuration configRootFS *image.RootFS // rootFS from configuration
release func() // release resources from rootFS download release func() // release resources from rootFS download
configOS layer.OS // for LCOW when registering downloaded layers configOS string // for LCOW when registering downloaded layers
) )
// https://github.com/docker/docker/issues/24766 - Err on the side of caution, // https://github.com/docker/docker/issues/24766 - Err on the side of caution,
@ -615,7 +615,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
// Early bath if the requested OS doesn't match that of the configuration. // Early bath if the requested OS doesn't match that of the configuration.
// This avoids doing the download, only to potentially fail later. // This avoids doing the download, only to potentially fail later.
if !strings.EqualFold(string(configOS), requestedOS) { if !strings.EqualFold(configOS, requestedOS) {
return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS) return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
} }
@ -633,7 +633,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
rootFS image.RootFS rootFS image.RootFS
) )
downloadRootFS := *image.NewRootFS() downloadRootFS := *image.NewRootFS()
rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layer.OS(requestedOS), descriptors, p.config.ProgressOutput) rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, requestedOS, descriptors, p.config.ProgressOutput)
if err != nil { if err != nil {
// Intentionally do not cancel the config download here // Intentionally do not cancel the config download here
// as the error from config download (if there is one) // as the error from config download (if there is one)
@ -698,7 +698,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
return imageID, manifestDigest, nil return imageID, manifestDigest, nil
} }
func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, layer.OS, error) { func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, string, error) {
select { select {
case configJSON := <-configChan: case configJSON := <-configChan:
rootfs, os, err := s.RootFSAndOSFromConfig(configJSON) rootfs, os, err := s.RootFSAndOSFromConfig(configJSON)

View file

@ -4,7 +4,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"runtime"
"time" "time"
"github.com/docker/distribution" "github.com/docker/distribution"
@ -23,7 +22,7 @@ const maxDownloadAttempts = 5
// registers and downloads those, taking into account dependencies between // registers and downloads those, taking into account dependencies between
// layers. // layers.
type LayerDownloadManager struct { type LayerDownloadManager struct {
layerStores map[string]layer.Store layerStore layer.Store
tm TransferManager tm TransferManager
waitDuration time.Duration waitDuration time.Duration
} }
@ -34,9 +33,9 @@ func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) {
} }
// NewLayerDownloadManager returns a new LayerDownloadManager. // NewLayerDownloadManager returns a new LayerDownloadManager.
func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager {
manager := LayerDownloadManager{ manager := LayerDownloadManager{
layerStores: layerStores, layerStore: layerStore,
tm: NewTransferManager(concurrencyLimit), tm: NewTransferManager(concurrencyLimit),
waitDuration: time.Second, waitDuration: time.Second,
} }
@ -95,7 +94,7 @@ type DownloadDescriptorWithRegistered interface {
// Download method is called to get the layer tar data. Layers are then // Download method is called to get the layer tar data. Layers are then
// registered in the appropriate order. The caller must call the returned // registered in the appropriate order. The caller must call the returned
// release function once it is done with the returned RootFS object. // release function once it is done with the returned RootFS object.
func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os layer.OS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
var ( var (
topLayer layer.Layer topLayer layer.Layer
topDownload *downloadTransfer topDownload *downloadTransfer
@ -105,11 +104,6 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima
downloadsByKey = make(map[string]*downloadTransfer) downloadsByKey = make(map[string]*downloadTransfer)
) )
// Assume that the operating system is the host OS if blank
if os == "" {
os = layer.OS(runtime.GOOS)
}
rootFS := initialRootFS rootFS := initialRootFS
for _, descriptor := range layers { for _, descriptor := range layers {
key := descriptor.Key() key := descriptor.Key()
@ -121,20 +115,20 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima
if err == nil { if err == nil {
getRootFS := rootFS getRootFS := rootFS
getRootFS.Append(diffID) getRootFS.Append(diffID)
l, err := ldm.layerStores[string(os)].Get(getRootFS.ChainID()) l, err := ldm.layerStore.Get(getRootFS.ChainID())
if err == nil { if err == nil {
// Layer already exists. // Layer already exists.
logrus.Debugf("Layer already exists: %s", descriptor.ID()) logrus.Debugf("Layer already exists: %s", descriptor.ID())
progress.Update(progressOutput, descriptor.ID(), "Already exists") progress.Update(progressOutput, descriptor.ID(), "Already exists")
if topLayer != nil { if topLayer != nil {
layer.ReleaseAndLog(ldm.layerStores[string(os)], topLayer) layer.ReleaseAndLog(ldm.layerStore, topLayer)
} }
topLayer = l topLayer = l
missingLayer = false missingLayer = false
rootFS.Append(diffID) rootFS.Append(diffID)
// Register this repository as a source of this layer. // Register this repository as a source of this layer.
withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered)
if hasRegistered { if hasRegistered { // As layerstore may set the driver
withRegistered.Registered(diffID) withRegistered.Registered(diffID)
} }
continue continue
@ -171,7 +165,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima
if topDownload == nil { if topDownload == nil {
return rootFS, func() { return rootFS, func() {
if topLayer != nil { if topLayer != nil {
layer.ReleaseAndLog(ldm.layerStores[string(os)], topLayer) layer.ReleaseAndLog(ldm.layerStore, topLayer)
} }
}, nil }, nil
} }
@ -182,7 +176,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima
defer func() { defer func() {
if topLayer != nil { if topLayer != nil {
layer.ReleaseAndLog(ldm.layerStores[string(os)], topLayer) layer.ReleaseAndLog(ldm.layerStore, topLayer)
} }
}() }()
@ -218,11 +212,11 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima
// complete before the registration step, and registers the downloaded data // complete before the registration step, and registers the downloaded data
// on top of parentDownload's resulting layer. Otherwise, it registers the // on top of parentDownload's resulting layer. Otherwise, it registers the
// layer on top of the ChainID given by parentLayer. // layer on top of the ChainID given by parentLayer.
func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os layer.OS) DoFunc { func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os string) DoFunc {
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
d := &downloadTransfer{ d := &downloadTransfer{
Transfer: NewTransfer(), Transfer: NewTransfer(),
layerStore: ldm.layerStores[string(os)], layerStore: ldm.layerStore,
} }
go func() { go func() {
@ -382,11 +376,11 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor,
// parentDownload. This function does not log progress output because it would // parentDownload. This function does not log progress output because it would
// interfere with the progress reporting for sourceDownload, which has the same // interfere with the progress reporting for sourceDownload, which has the same
// Key. // Key.
func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os layer.OS) DoFunc { func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os string) DoFunc {
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
d := &downloadTransfer{ d := &downloadTransfer{
Transfer: NewTransfer(), Transfer: NewTransfer(),
layerStore: ldm.layerStores[string(os)], layerStore: ldm.layerStore,
} }
go func() { go func() {

View file

@ -26,7 +26,7 @@ type mockLayer struct {
diffID layer.DiffID diffID layer.DiffID
chainID layer.ChainID chainID layer.ChainID
parent layer.Layer parent layer.Layer
os layer.OS os string
} }
func (ml *mockLayer) TarStream() (io.ReadCloser, error) { func (ml *mockLayer) TarStream() (io.ReadCloser, error) {
@ -57,7 +57,7 @@ func (ml *mockLayer) DiffSize() (size int64, err error) {
return 0, nil return 0, nil
} }
func (ml *mockLayer) OS() layer.OS { func (ml *mockLayer) OS() string {
return ml.os return ml.os
} }
@ -91,7 +91,7 @@ func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer {
return layers return layers
} }
func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID, os layer.OS) (layer.Layer, error) { func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID, os string) (layer.Layer, error) {
return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{})
} }
@ -131,7 +131,7 @@ func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) {
func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) {
return []layer.Metadata{}, nil return []layer.Metadata{}, nil
} }
func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, *layer.CreateRWLayerOpts) (layer.RWLayer, error) { func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, string, *layer.CreateRWLayerOpts) (layer.RWLayer, error) {
return nil, errors.New("not implemented") return nil, errors.New("not implemented")
} }
@ -150,11 +150,11 @@ func (ls *mockLayerStore) Cleanup() error {
return nil return nil
} }
func (ls *mockLayerStore) DriverStatus() [][2]string { func (ls *mockLayerStore) DriverStatus(string) [][2]string {
return [][2]string{} return [][2]string{}
} }
func (ls *mockLayerStore) DriverName() string { func (ls *mockLayerStore) DriverName(string) string {
return "mock" return "mock"
} }
@ -272,9 +272,7 @@ func TestSuccessfulDownload(t *testing.T) {
} }
layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)}
lsMap := make(map[string]layer.Store) ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
lsMap[runtime.GOOS] = layerStore
ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
progressChan := make(chan progress.Progress) progressChan := make(chan progress.Progress)
progressDone := make(chan struct{}) progressDone := make(chan struct{})
@ -293,13 +291,13 @@ func TestSuccessfulDownload(t *testing.T) {
firstDescriptor := descriptors[0].(*mockDownloadDescriptor) firstDescriptor := descriptors[0].(*mockDownloadDescriptor)
// Pre-register the first layer to simulate an already-existing layer // Pre-register the first layer to simulate an already-existing layer
l, err := layerStore.Register(firstDescriptor.mockTarStream(), "", layer.OS(runtime.GOOS)) l, err := layerStore.Register(firstDescriptor.mockTarStream(), "", runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
firstDescriptor.diffID = l.DiffID() firstDescriptor.diffID = l.DiffID()
rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), layer.OS(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), runtime.GOOS, descriptors, progress.ChanOutput(progressChan))
if err != nil { if err != nil {
t.Fatalf("download error: %v", err) t.Fatalf("download error: %v", err)
} }
@ -336,9 +334,7 @@ func TestSuccessfulDownload(t *testing.T) {
func TestCancelledDownload(t *testing.T) { func TestCancelledDownload(t *testing.T) {
layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)}
lsMap := make(map[string]layer.Store) ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
lsMap[runtime.GOOS] = layerStore
ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond })
progressChan := make(chan progress.Progress) progressChan := make(chan progress.Progress)
progressDone := make(chan struct{}) progressDone := make(chan struct{})
@ -357,7 +353,7 @@ func TestCancelledDownload(t *testing.T) {
}() }()
descriptors := downloadDescriptors(nil) descriptors := downloadDescriptors(nil)
_, _, err := ldm.Download(ctx, *image.NewRootFS(), layer.OS(runtime.GOOS), descriptors, progress.ChanOutput(progressChan)) _, _, err := ldm.Download(ctx, *image.NewRootFS(), runtime.GOOS, descriptors, progress.ChanOutput(progressChan))
if err != context.Canceled { if err != context.Canceled {
t.Fatal("expected download to be cancelled") t.Fatal("expected download to be cancelled")
} }

View file

@ -3,13 +3,11 @@ package image
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"strings"
"sync" "sync"
"time" "time"
"github.com/docker/distribution/digestset" "github.com/docker/distribution/digestset"
"github.com/docker/docker/layer" "github.com/docker/docker/layer"
"github.com/docker/docker/pkg/system"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -47,17 +45,15 @@ type store struct {
images map[ID]*imageMeta images map[ID]*imageMeta
fs StoreBackend fs StoreBackend
digestSet *digestset.Set digestSet *digestset.Set
os string
} }
// NewImageStore returns new store object for given layer store // NewImageStore returns new store object for given layer store
func NewImageStore(fs StoreBackend, os string, ls LayerGetReleaser) (Store, error) { func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) {
is := &store{ is := &store{
ls: ls, ls: ls,
images: make(map[ID]*imageMeta), images: make(map[ID]*imageMeta),
fs: fs, fs: fs,
digestSet: digestset.NewSet(), digestSet: digestset.NewSet(),
os: os,
} }
// load all current images and retain layers // load all current images and retain layers
@ -118,14 +114,6 @@ func (is *store) Create(config []byte) (ID, error) {
return "", err return "", err
} }
// TODO @jhowardmsft - LCOW Support. This will need revisiting when coalescing the image stores.
// Integrity check - ensure we are creating something for the correct platform
if system.LCOWSupported() {
if strings.ToLower(img.OperatingSystem()) != strings.ToLower(is.os) {
return "", fmt.Errorf("cannot create entry for operating system %q in image store for operating system %q", img.OperatingSystem(), is.os)
}
}
// Must reject any config that references diffIDs from the history // Must reject any config that references diffIDs from the history
// which aren't among the rootfs layers. // which aren't among the rootfs layers.
rootFSLayers := make(map[layer.DiffID]struct{}) rootFSLayers := make(map[layer.DiffID]struct{})

View file

@ -1,7 +1,6 @@
package image package image
import ( import (
"runtime"
"testing" "testing"
"github.com/docker/docker/internal/testutil" "github.com/docker/docker/internal/testutil"
@ -26,7 +25,7 @@ func TestRestore(t *testing.T) {
err = fs.SetMetadata(id2, "parent", []byte(id1)) err = fs.SetMetadata(id2, "parent", []byte(id1))
assert.NoError(t, err) assert.NoError(t, err)
is, err := NewImageStore(fs, runtime.GOOS, &mockLayerGetReleaser{}) is, err := NewImageStore(fs, &mockLayerGetReleaser{})
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, is.Map(), 2) assert.Len(t, is.Map(), 2)
@ -143,7 +142,7 @@ func TestParentReset(t *testing.T) {
func defaultImageStore(t *testing.T) (Store, func()) { func defaultImageStore(t *testing.T) (Store, func()) {
fsBackend, cleanup := defaultFSStoreBackend(t) fsBackend, cleanup := defaultFSStoreBackend(t)
store, err := NewImageStore(fsBackend, runtime.GOOS, &mockLayerGetReleaser{}) store, err := NewImageStore(fsBackend, &mockLayerGetReleaser{})
assert.NoError(t, err) assert.NoError(t, err)
return store, cleanup return store, cleanup

View file

@ -90,11 +90,11 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool)
} }
// On Windows, validate the platform, defaulting to windows if not present. // On Windows, validate the platform, defaulting to windows if not present.
os := layer.OS(img.OS) os := img.OS
if os == "" {
os = runtime.GOOS
}
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
if os == "" {
os = "windows"
}
if (os != "windows") && (os != "linux") { if (os != "windows") && (os != "linux") {
return fmt.Errorf("configuration for this image has an unsupported operating system: %s", os) return fmt.Errorf("configuration for this image has an unsupported operating system: %s", os)
} }
@ -176,7 +176,7 @@ func (l *tarexporter) setParentID(id, parentID image.ID) error {
return l.is.SetParent(id, parentID) return l.is.SetParent(id, parentID)
} }
func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, os layer.OS, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, os string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) {
// We use system.OpenSequential to use sequential file access on Windows, avoiding // We use system.OpenSequential to use sequential file access on Windows, avoiding
// depleting the standby list. On Linux, this equates to a regular os.Open. // depleting the standby list. On Linux, this equates to a regular os.Open.
rawTar, err := system.OpenSequential(filename) rawTar, err := system.OpenSequential(filename)
@ -409,19 +409,18 @@ func checkValidParent(img, parent *image.Image) bool {
return true return true
} }
func checkCompatibleOS(os string) error { func checkCompatibleOS(imageOS string) error {
// TODO @jhowardmsft LCOW - revisit for simultaneous platforms // always compatible if the images OS matches the host OS; also match an empty image OS
platform := runtime.GOOS if imageOS == runtime.GOOS || imageOS == "" {
if system.LCOWSupported() {
platform = "linux"
}
// always compatible if the OS matches; also match an empty OS
if os == platform || os == "" {
return nil return nil
} }
// for compatibility, only fail if the image or runtime OS is Windows // On non-Windows hosts, for compatibility, fail if the image is Windows.
if os == "windows" || platform == "windows" { if runtime.GOOS != "windows" && imageOS == "windows" {
return fmt.Errorf("cannot load %s image on %s", os, platform) return fmt.Errorf("cannot load %s image on %s", imageOS, runtime.GOOS)
}
// Finally, check the image OS is supported for the platform.
if err := system.ValidatePlatform(system.ParsePlatform(imageOS)); err != nil {
return fmt.Errorf("cannot load %s image on %s: %s", imageOS, runtime.GOOS, err)
} }
return nil return nil
} }

View file

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"runtime"
) )
// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - // DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file -
@ -55,8 +56,8 @@ func (el *emptyLayer) Metadata() (map[string]string, error) {
return make(map[string]string), nil return make(map[string]string), nil
} }
func (el *emptyLayer) OS() OS { func (el *emptyLayer) OS() string {
return "" return runtime.GOOS
} }
// IsEmpty returns true if the layer is an EmptyLayer // IsEmpty returns true if the layer is an EmptyLayer

View file

@ -2,12 +2,14 @@
package layer package layer
import "runtime"
// SetOS writes the "os" file to the layer filestore // SetOS writes the "os" file to the layer filestore
func (fm *fileMetadataTransaction) SetOS(os OS) error { func (fm *fileMetadataTransaction) SetOS(os string) error {
return nil return nil
} }
// GetOS reads the "os" file from the layer filestore // GetOS reads the "os" file from the layer filestore
func (fms *fileMetadataStore) GetOS(layer ChainID) (OS, error) { func (fms *fileMetadataStore) GetOS(layer ChainID) (string, error) {
return "", nil return runtime.GOOS, nil
} }

View file

@ -8,7 +8,7 @@ import (
) )
// SetOS writes the "os" file to the layer filestore // SetOS writes the "os" file to the layer filestore
func (fm *fileMetadataTransaction) SetOS(os OS) error { func (fm *fileMetadataTransaction) SetOS(os string) error {
if os == "" { if os == "" {
return nil return nil
} }
@ -16,7 +16,7 @@ func (fm *fileMetadataTransaction) SetOS(os OS) error {
} }
// GetOS reads the "os" file from the layer filestore // GetOS reads the "os" file from the layer filestore
func (fms *fileMetadataStore) GetOS(layer ChainID) (OS, error) { func (fms *fileMetadataStore) GetOS(layer ChainID) (string, error) {
contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "os")) contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "os"))
if err != nil { if err != nil {
// For backwards compatibility, the os file may not exist. Default to "windows" if missing. // For backwards compatibility, the os file may not exist. Default to "windows" if missing.
@ -31,5 +31,5 @@ func (fms *fileMetadataStore) GetOS(layer ChainID) (OS, error) {
return "", fmt.Errorf("invalid operating system value: %s", content) return "", fmt.Errorf("invalid operating system value: %s", content)
} }
return OS(content), nil return content, nil
} }

View file

@ -65,14 +65,6 @@ func (id ChainID) String() string {
return string(id) return string(id)
} }
// OS is the operating system of a layer
type OS string
// String returns a string rendition of layers target operating system
func (id OS) String() string {
return string(id)
}
// DiffID is the hash of an individual layer tar. // DiffID is the hash of an individual layer tar.
type DiffID digest.Digest type DiffID digest.Digest
@ -109,7 +101,7 @@ type Layer interface {
Parent() Layer Parent() Layer
// OS returns the operating system of the layer // OS returns the operating system of the layer
OS() OS OS() string
// Size returns the size of the entire layer chain. The size // Size returns the size of the entire layer chain. The size
// is calculated from the total size of all files in the layers. // is calculated from the total size of all files in the layers.
@ -156,6 +148,9 @@ type RWLayer interface {
// Metadata returns the low level metadata for the mutable layer // Metadata returns the low level metadata for the mutable layer
Metadata() (map[string]string, error) Metadata() (map[string]string, error)
// OS returns the operating system of the writable layer
OS() string
} }
// Metadata holds information about a // Metadata holds information about a
@ -191,25 +186,25 @@ type CreateRWLayerOpts struct {
// Store represents a backend for managing both // Store represents a backend for managing both
// read-only and read-write layers. // read-only and read-write layers.
type Store interface { type Store interface {
Register(io.Reader, ChainID, OS) (Layer, error) Register(io.Reader, ChainID, string) (Layer, error)
Get(ChainID) (Layer, error) Get(ChainID) (Layer, error)
Map() map[ChainID]Layer Map() map[ChainID]Layer
Release(Layer) ([]Metadata, error) Release(Layer) ([]Metadata, error)
CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) CreateRWLayer(id string, parent ChainID, os string, opts *CreateRWLayerOpts) (RWLayer, error)
GetRWLayer(id string) (RWLayer, error) GetRWLayer(id string) (RWLayer, error)
GetMountID(id string) (string, error) GetMountID(id string) (string, error)
ReleaseRWLayer(RWLayer) ([]Metadata, error) ReleaseRWLayer(RWLayer) ([]Metadata, error)
Cleanup() error Cleanup() error
DriverStatus() [][2]string DriverStatus(os string) [][2]string
DriverName() string DriverName(os string) string
} }
// DescribableStore represents a layer store capable of storing // DescribableStore represents a layer store capable of storing
// descriptors for layers. // descriptors for layers.
type DescribableStore interface { type DescribableStore interface {
RegisterWithDescriptor(io.Reader, ChainID, OS, distribution.Descriptor) (Layer, error) RegisterWithDescriptor(io.Reader, ChainID, string, distribution.Descriptor) (Layer, error)
} }
// MetadataTransaction represents functions for setting layer metadata // MetadataTransaction represents functions for setting layer metadata
@ -220,7 +215,7 @@ type MetadataTransaction interface {
SetDiffID(DiffID) error SetDiffID(DiffID) error
SetCacheID(string) error SetCacheID(string) error
SetDescriptor(distribution.Descriptor) error SetDescriptor(distribution.Descriptor) error
SetOS(OS) error SetOS(string) error
TarSplitWriter(compressInput bool) (io.WriteCloser, error) TarSplitWriter(compressInput bool) (io.WriteCloser, error)
Commit(ChainID) error Commit(ChainID) error
@ -241,7 +236,7 @@ type MetadataStore interface {
GetDiffID(ChainID) (DiffID, error) GetDiffID(ChainID) (DiffID, error)
GetCacheID(ChainID) (string, error) GetCacheID(ChainID) (string, error)
GetDescriptor(ChainID) (distribution.Descriptor, error) GetDescriptor(ChainID) (distribution.Descriptor, error)
GetOS(ChainID) (OS, error) GetOS(ChainID) (string, error)
TarSplitReader(ChainID) (io.ReadCloser, error) TarSplitReader(ChainID) (io.ReadCloser, error)
SetMountID(string, string) error SetMountID(string, string) error

View file

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"strings" "runtime"
"sync" "sync"
"github.com/docker/distribution" "github.com/docker/distribution"
@ -28,70 +28,77 @@ import (
const maxLayerDepth = 125 const maxLayerDepth = 125
type layerStore struct { type layerStore struct {
store MetadataStore store MetadataStore
driver graphdriver.Driver drivers map[string]graphdriver.Driver
useTarSplit map[string]bool
layerMap map[ChainID]*roLayer layerMap map[ChainID]*roLayer
layerL sync.Mutex layerL sync.Mutex
mounts map[string]*mountedLayer mounts map[string]*mountedLayer
mountL sync.Mutex mountL sync.Mutex
useTarSplit bool
os string
} }
// StoreOptions are the options used to create a new Store instance // StoreOptions are the options used to create a new Store instance
type StoreOptions struct { type StoreOptions struct {
StorePath string Root string
GraphDrivers map[string]string
MetadataStorePathTemplate string MetadataStorePathTemplate string
GraphDriver string
GraphDriverOptions []string GraphDriverOptions []string
IDMappings *idtools.IDMappings IDMappings *idtools.IDMappings
PluginGetter plugingetter.PluginGetter PluginGetter plugingetter.PluginGetter
ExperimentalEnabled bool ExperimentalEnabled bool
OS string
} }
// NewStoreFromOptions creates a new Store instance // NewStoreFromOptions creates a new Store instance
func NewStoreFromOptions(options StoreOptions) (Store, error) { func NewStoreFromOptions(options StoreOptions) (Store, error) {
driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ drivers := make(map[string]graphdriver.Driver)
Root: options.StorePath, for os, drivername := range options.GraphDrivers {
DriverOptions: options.GraphDriverOptions, var err error
UIDMaps: options.IDMappings.UIDs(), drivers[os], err = graphdriver.New(drivername,
GIDMaps: options.IDMappings.GIDs(), options.PluginGetter,
ExperimentalEnabled: options.ExperimentalEnabled, graphdriver.Options{
}) Root: options.Root,
if err != nil { DriverOptions: options.GraphDriverOptions,
return nil, fmt.Errorf("error initializing graphdriver: %v", err) UIDMaps: options.IDMappings.UIDs(),
GIDMaps: options.IDMappings.GIDs(),
ExperimentalEnabled: options.ExperimentalEnabled,
})
if err != nil {
return nil, fmt.Errorf("error initializing graphdriver: %v", err)
}
logrus.Debugf("Initialized graph driver %s", drivername)
} }
logrus.Debugf("Using graph driver %s", driver)
fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, options.GraphDrivers[runtime.GOOS]))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return NewStoreFromGraphDriver(fms, driver, options.OS) return NewStoreFromGraphDrivers(fms, drivers)
} }
// NewStoreFromGraphDriver creates a new Store instance using the provided // NewStoreFromGraphDrivers creates a new Store instance using the provided
// metadata store and graph driver. The metadata store will be used to restore // metadata store and graph drivers. The metadata store will be used to restore
// the Store. // the Store.
func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, os string) (Store, error) { func NewStoreFromGraphDrivers(store MetadataStore, drivers map[string]graphdriver.Driver) (Store, error) {
caps := graphdriver.Capabilities{}
if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { useTarSplit := make(map[string]bool)
caps = capDriver.Capabilities() for os, driver := range drivers {
caps := graphdriver.Capabilities{}
if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok {
caps = capDriver.Capabilities()
}
useTarSplit[os] = !caps.ReproducesExactDiffs
} }
ls := &layerStore{ ls := &layerStore{
store: store, store: store,
driver: driver, drivers: drivers,
layerMap: map[ChainID]*roLayer{}, layerMap: map[ChainID]*roLayer{},
mounts: map[string]*mountedLayer{}, mounts: map[string]*mountedLayer{},
useTarSplit: !caps.ReproducesExactDiffs, useTarSplit: useTarSplit,
os: os,
} }
ids, mounts, err := store.List() ids, mounts, err := store.List()
@ -227,7 +234,7 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri
tr := io.TeeReader(ts, digester.Hash()) tr := io.TeeReader(ts, digester.Hash())
rdr := tr rdr := tr
if ls.useTarSplit { if ls.useTarSplit[layer.os] {
tsw, err := tx.TarSplitWriter(true) tsw, err := tx.TarSplitWriter(true)
if err != nil { if err != nil {
return err return err
@ -243,7 +250,7 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri
} }
} }
applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) applySize, err := ls.drivers[layer.os].ApplyDiff(layer.cacheID, parent, rdr)
if err != nil { if err != nil {
return err return err
} }
@ -259,11 +266,11 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri
return nil return nil
} }
func (ls *layerStore) Register(ts io.Reader, parent ChainID, os OS) (Layer, error) { func (ls *layerStore) Register(ts io.Reader, parent ChainID, os string) (Layer, error) {
return ls.registerWithDescriptor(ts, parent, os, distribution.Descriptor{}) return ls.registerWithDescriptor(ts, parent, os, distribution.Descriptor{})
} }
func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS, descriptor distribution.Descriptor) (Layer, error) { func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os string, descriptor distribution.Descriptor) (Layer, error) {
// err is used to hold the error which will always trigger // err is used to hold the error which will always trigger
// cleanup of creates sources but may not be an error returned // cleanup of creates sources but may not be an error returned
// to the caller (already exists). // to the caller (already exists).
@ -271,13 +278,6 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS
var pid string var pid string
var p *roLayer var p *roLayer
// Integrity check - ensure we are creating something for the correct operating system
if system.LCOWSupported() {
if strings.ToLower(ls.os) != strings.ToLower(string(os)) {
return nil, fmt.Errorf("cannot create entry for operating system %q in layer store for operating system %q", os, ls.os)
}
}
if string(parent) != "" { if string(parent) != "" {
p = ls.get(parent) p = ls.get(parent)
if p == nil { if p == nil {
@ -298,6 +298,14 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS
} }
} }
// Validate the operating system is valid
if os == "" {
os = runtime.GOOS
}
if err := system.ValidatePlatform(system.ParsePlatform(os)); err != nil {
return nil, err
}
// Create new roLayer // Create new roLayer
layer := &roLayer{ layer := &roLayer{
parent: p, parent: p,
@ -309,7 +317,7 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS
os: os, os: os,
} }
if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { if err = ls.drivers[os].Create(layer.cacheID, pid, nil); err != nil {
return nil, err return nil, err
} }
@ -321,7 +329,7 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS
defer func() { defer func() {
if err != nil { if err != nil {
logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err)
if err := ls.driver.Remove(layer.cacheID); err != nil { if err := ls.drivers[os].Remove(layer.cacheID); err != nil {
logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err)
} }
if err := tx.Cancel(); err != nil { if err := tx.Cancel(); err != nil {
@ -405,7 +413,7 @@ func (ls *layerStore) Map() map[ChainID]Layer {
} }
func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
err := ls.driver.Remove(layer.cacheID) err := ls.drivers[layer.os].Remove(layer.cacheID)
if err != nil { if err != nil {
return err return err
} }
@ -475,7 +483,7 @@ func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
return ls.releaseLayer(layer) return ls.releaseLayer(layer)
} }
func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) { func (ls *layerStore) CreateRWLayer(name string, parent ChainID, os string, opts *CreateRWLayerOpts) (RWLayer, error) {
var ( var (
storageOpt map[string]string storageOpt map[string]string
initFunc MountInit initFunc MountInit
@ -515,16 +523,21 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWL
}() }()
} }
// Ensure the operating system is set to the host OS if not populated.
if os == "" {
os = runtime.GOOS
}
m = &mountedLayer{ m = &mountedLayer{
name: name, name: name,
parent: p, parent: p,
mountID: ls.mountID(name), mountID: ls.mountID(name),
layerStore: ls, layerStore: ls,
references: map[RWLayer]*referencedRWLayer{}, references: map[RWLayer]*referencedRWLayer{},
os: os,
} }
if initFunc != nil { if initFunc != nil {
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) pid, err = ls.initMount(m.mountID, m.os, pid, mountLabel, initFunc, storageOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -535,7 +548,7 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWL
StorageOpt: storageOpt, StorageOpt: storageOpt,
} }
if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { if err = ls.drivers[os].CreateReadWrite(m.mountID, pid, createOpts); err != nil {
return nil, err return nil, err
} }
if err = ls.saveMount(m); err != nil { if err = ls.saveMount(m); err != nil {
@ -584,14 +597,14 @@ func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
return []Metadata{}, nil return []Metadata{}, nil
} }
if err := ls.driver.Remove(m.mountID); err != nil { if err := ls.drivers[l.OS()].Remove(m.mountID); err != nil {
logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) logrus.Errorf("Error removing mounted layer %s: %s", m.name, err)
m.retakeReference(l) m.retakeReference(l)
return nil, err return nil, err
} }
if m.initID != "" { if m.initID != "" {
if err := ls.driver.Remove(m.initID); err != nil { if err := ls.drivers[l.OS()].Remove(m.initID); err != nil {
logrus.Errorf("Error removing init layer %s: %s", m.name, err) logrus.Errorf("Error removing init layer %s: %s", m.name, err)
m.retakeReference(l) m.retakeReference(l)
return nil, err return nil, err
@ -637,7 +650,7 @@ func (ls *layerStore) saveMount(mount *mountedLayer) error {
return nil return nil
} }
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { func (ls *layerStore) initMount(graphID, os, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
// Use "<graph-id>-init" to maintain compatibility with graph drivers // Use "<graph-id>-init" to maintain compatibility with graph drivers
// which are expecting this layer with this special name. If all // which are expecting this layer with this special name. If all
// graph drivers can be updated to not rely on knowing about this layer // graph drivers can be updated to not rely on knowing about this layer
@ -649,20 +662,20 @@ func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc Mou
StorageOpt: storageOpt, StorageOpt: storageOpt,
} }
if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { if err := ls.drivers[os].CreateReadWrite(initID, parent, createOpts); err != nil {
return "", err return "", err
} }
p, err := ls.driver.Get(initID, "") p, err := ls.drivers[os].Get(initID, "")
if err != nil { if err != nil {
return "", err return "", err
} }
if err := initFunc(p); err != nil { if err := initFunc(p); err != nil {
ls.driver.Put(initID) ls.drivers[os].Put(initID)
return "", err return "", err
} }
if err := ls.driver.Put(initID); err != nil { if err := ls.drivers[os].Put(initID); err != nil {
return "", err return "", err
} }
@ -670,13 +683,13 @@ func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc Mou
} }
func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) { func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) {
if !ls.useTarSplit { if !ls.useTarSplit[rl.os] {
var parentCacheID string var parentCacheID string
if rl.parent != nil { if rl.parent != nil {
parentCacheID = rl.parent.cacheID parentCacheID = rl.parent.cacheID
} }
return ls.driver.Diff(rl.cacheID, parentCacheID) return ls.drivers[rl.os].Diff(rl.cacheID, parentCacheID)
} }
r, err := ls.store.TarSplitReader(rl.chainID) r, err := ls.store.TarSplitReader(rl.chainID)
@ -686,7 +699,7 @@ func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) {
pr, pw := io.Pipe() pr, pw := io.Pipe()
go func() { go func() {
err := ls.assembleTarTo(rl.cacheID, r, nil, pw) err := ls.assembleTarTo(rl.cacheID, rl.os, r, nil, pw)
if err != nil { if err != nil {
pw.CloseWithError(err) pw.CloseWithError(err)
} else { } else {
@ -697,10 +710,10 @@ func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) {
return pr, nil return pr, nil
} }
func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { func (ls *layerStore) assembleTarTo(graphID, os string, metadata io.ReadCloser, size *int64, w io.Writer) error {
diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) diffDriver, ok := ls.drivers[os].(graphdriver.DiffGetterDriver)
if !ok { if !ok {
diffDriver = &naiveDiffPathDriver{ls.driver} diffDriver = &naiveDiffPathDriver{ls.drivers[os]}
} }
defer metadata.Close() defer metadata.Close()
@ -719,15 +732,27 @@ func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size
} }
func (ls *layerStore) Cleanup() error { func (ls *layerStore) Cleanup() error {
return ls.driver.Cleanup() var err error
for _, driver := range ls.drivers {
if e := driver.Cleanup(); e != nil {
err = fmt.Errorf("%s - %s", err.Error(), e.Error())
}
}
return err
} }
func (ls *layerStore) DriverStatus() [][2]string { func (ls *layerStore) DriverStatus(os string) [][2]string {
return ls.driver.Status() if os == "" {
os = runtime.GOOS
}
return ls.drivers[os].Status()
} }
func (ls *layerStore) DriverName() string { func (ls *layerStore) DriverName(os string) string {
return ls.driver.String() if os == "" {
os = runtime.GOOS
}
return ls.drivers[os].String()
} }
type naiveDiffPathDriver struct { type naiveDiffPathDriver struct {

View file

@ -6,6 +6,6 @@ import (
"github.com/docker/distribution" "github.com/docker/distribution"
) )
func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, os OS, descriptor distribution.Descriptor) (Layer, error) { func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, os string, descriptor distribution.Descriptor) (Layer, error) {
return ls.registerWithDescriptor(ts, parent, os, descriptor) return ls.registerWithDescriptor(ts, parent, os, descriptor)
} }

View file

@ -73,7 +73,9 @@ func newTestStore(t *testing.T) (Store, string, func()) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) graphs := make(map[string]graphdriver.Driver)
graphs[runtime.GOOS] = graph
ls, err := NewStoreFromGraphDrivers(fms, graphs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -88,7 +90,7 @@ type layerInit func(root containerfs.ContainerFS) error
func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
containerID := stringid.GenerateRandomID() containerID := stringid.GenerateRandomID()
mount, err := ls.CreateRWLayer(containerID, parent, nil) mount, err := ls.CreateRWLayer(containerID, parent, runtime.GOOS, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -108,7 +110,7 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
} }
defer ts.Close() defer ts.Close()
layer, err := ls.Register(ts, parent, OS(runtime.GOOS)) layer, err := ls.Register(ts, parent, runtime.GOOS)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -277,7 +279,7 @@ func TestMountAndRegister(t *testing.T) {
size, _ := layer.Size() size, _ := layer.Size()
t.Logf("Layer size: %d", size) t.Logf("Layer size: %d", size)
mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), nil) mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), runtime.GOOS, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -385,7 +387,7 @@ func TestStoreRestore(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), nil) m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), runtime.GOOS, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -403,7 +405,7 @@ func TestStoreRestore(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver, runtime.GOOS) ls2, err := NewStoreFromGraphDrivers(ls.(*layerStore).store, ls.(*layerStore).drivers)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -416,7 +418,7 @@ func TestStoreRestore(t *testing.T) {
assertLayerEqual(t, layer3b, layer3) assertLayerEqual(t, layer3b, layer3)
// Create again with same name, should return error // Create again with same name, should return error
if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), nil); err == nil { if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), runtime.GOOS, nil); err == nil {
t.Fatal("Expected error creating mount with same name") t.Fatal("Expected error creating mount with same name")
} else if err != ErrMountNameConflict { } else if err != ErrMountNameConflict {
t.Fatal(err) t.Fatal(err)
@ -498,13 +500,13 @@ func TestTarStreamStability(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
layer1, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS)) layer1, err := ls.Register(bytes.NewReader(tar1), "", runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// hack layer to add file // hack layer to add file
p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") p, err := ls.(*layerStore).drivers[runtime.GOOS].Get(layer1.(*referencedCacheLayer).cacheID, "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -513,11 +515,11 @@ func TestTarStreamStability(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { if err := ls.(*layerStore).drivers[runtime.GOOS].Put(layer1.(*referencedCacheLayer).cacheID); err != nil {
t.Fatal(err) t.Fatal(err)
} }
layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID(), OS(runtime.GOOS)) layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID(), runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -685,12 +687,12 @@ func TestRegisterExistingLayer(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), OS(runtime.GOOS)) layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), OS(runtime.GOOS)) layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -725,12 +727,12 @@ func TestTarStreamVerification(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
layer1, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS)) layer1, err := ls.Register(bytes.NewReader(tar1), "", runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
layer2, err := ls.Register(bytes.NewReader(tar2), "", OS(runtime.GOOS)) layer2, err := ls.Register(bytes.NewReader(tar2), "", runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -2,7 +2,10 @@
package layer package layer
import "testing" import (
"runtime"
"testing"
)
func graphDiffSize(ls Store, l Layer) (int64, error) { func graphDiffSize(ls Store, l Layer) (int64, error) {
cl := getCachedLayer(l) cl := getCachedLayer(l)
@ -10,7 +13,7 @@ func graphDiffSize(ls Store, l Layer) (int64, error) {
if cl.parent != nil { if cl.parent != nil {
parent = cl.parent.cacheID parent = cl.parent.cacheID
} }
return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) return ls.(*layerStore).drivers[runtime.GOOS].DiffSize(cl.cacheID, parent)
} }
// Unix as Windows graph driver does not support Changes which is indirectly // Unix as Windows graph driver does not support Changes which is indirectly

View file

@ -25,16 +25,15 @@ func GetLayerPath(s Store, layer ChainID) (string, error) {
return "", ErrLayerDoesNotExist return "", ErrLayerDoesNotExist
} }
if layerGetter, ok := ls.driver.(Getter); ok { if layerGetter, ok := ls.drivers[rl.os].(Getter); ok {
return layerGetter.GetLayerPath(rl.cacheID) return layerGetter.GetLayerPath(rl.cacheID)
} }
path, err := ls.drivers[rl.os].Get(rl.cacheID, "")
path, err := ls.driver.Get(rl.cacheID, "")
if err != nil { if err != nil {
return "", err return "", err
} }
if err := ls.driver.Put(rl.cacheID); err != nil { if err := ls.drivers[rl.os].Put(rl.cacheID); err != nil {
return "", err return "", err
} }

View file

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"runtime"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -16,7 +17,7 @@ import (
// CreateRWLayerByGraphID creates a RWLayer in the layer store using // CreateRWLayerByGraphID creates a RWLayer in the layer store using
// the provided name with the given graphID. To get the RWLayer // the provided name with the given graphID. To get the RWLayer
// after migration the layer may be retrieved by the given name. // after migration the layer may be retrieved by the given name.
func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { func (ls *layerStore) CreateRWLayerByGraphID(name, graphID, os string, parent ChainID) (err error) {
ls.mountL.Lock() ls.mountL.Lock()
defer ls.mountL.Unlock() defer ls.mountL.Unlock()
m, ok := ls.mounts[name] m, ok := ls.mounts[name]
@ -31,7 +32,11 @@ func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent
return nil return nil
} }
if !ls.driver.Exists(graphID) { // Ensure the operating system is set to the host OS if not populated.
if os == "" {
os = runtime.GOOS
}
if !ls.drivers[os].Exists(graphID) {
return fmt.Errorf("graph ID does not exist: %q", graphID) return fmt.Errorf("graph ID does not exist: %q", graphID)
} }
@ -60,11 +65,12 @@ func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent
mountID: graphID, mountID: graphID,
layerStore: ls, layerStore: ls,
references: map[RWLayer]*referencedRWLayer{}, references: map[RWLayer]*referencedRWLayer{},
os: os,
} }
// Check for existing init layer // Check for existing init layer
initID := fmt.Sprintf("%s-init", graphID) initID := fmt.Sprintf("%s-init", graphID)
if ls.driver.Exists(initID) { if ls.drivers[os].Exists(initID) {
m.initID = initID m.initID = initID
} }
@ -95,7 +101,10 @@ func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataP
} }
dgst := digest.Canonical.Digester() dgst := digest.Canonical.Digester()
err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) // Note - we use the host OS here. This is a safe assumption as its during migration, and
// no host OS which supports migration also supports multiple image OS's. In other words,
// it's only on Linux, not on Windows.
err = ls.assembleTarTo(id, runtime.GOOS, uncompressed, &size, dgst.Hash())
if err != nil { if err != nil {
return return
} }
@ -111,7 +120,10 @@ func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataP
} }
func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) {
rawarchive, err := ls.driver.Diff(id, parent) // Note - we use the host OS here. This is a safe assumption as its during migration, and
// no host OS which supports migration also supports multiple image OS's. In other words,
// it's only on Linux, not on Windows.
rawarchive, err := ls.drivers[runtime.GOOS].Diff(id, parent)
if err != nil { if err != nil {
return return
} }

View file

@ -94,7 +94,9 @@ func TestLayerMigration(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) graphs := make(map[string]graphdriver.Driver)
graphs[runtime.GOOS] = graph
ls, err := NewStoreFromGraphDrivers(fms, graphs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -110,14 +112,14 @@ func TestLayerMigration(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
layer1b, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS)) layer1b, err := ls.Register(bytes.NewReader(tar1), "", runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
assertReferences(t, layer1a, layer1b) assertReferences(t, layer1a, layer1b)
// Attempt register, should be same // Attempt register, should be same
layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), OS(runtime.GOOS)) layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -222,7 +224,9 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
ls, err := NewStoreFromGraphDriver(fms, graph, runtime.GOOS) graphs := make(map[string]graphdriver.Driver)
graphs[runtime.GOOS] = graph
ls, err := NewStoreFromGraphDrivers(fms, graphs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -238,7 +242,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
layer1b, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS)) layer1b, err := ls.Register(bytes.NewReader(tar1), "", runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -246,7 +250,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
assertReferences(t, layer1a, layer1b) assertReferences(t, layer1a, layer1b)
// Attempt register, should be same // Attempt register, should be same
layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), OS(runtime.GOOS)) layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), runtime.GOOS)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -308,7 +312,7 @@ func TestMountMigration(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
graph := ls.(*layerStore).driver graph := ls.(*layerStore).drivers[runtime.GOOS]
layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) layer1, err := createLayer(ls, "", initWithFiles(baseFiles...))
if err != nil { if err != nil {
@ -334,7 +338,7 @@ func TestMountMigration(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil { if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, runtime.GOOS, layer1.ChainID()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -380,7 +384,7 @@ func TestMountMigration(t *testing.T) {
Kind: archive.ChangeAdd, Kind: archive.ChangeAdd,
}) })
if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), nil); err == nil { if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), runtime.GOOS, nil); err == nil {
t.Fatal("Expected error creating mount with same name") t.Fatal("Expected error creating mount with same name")
} else if err != ErrMountNameConflict { } else if err != ErrMountNameConflict {
t.Fatal(err) t.Fatal(err)

View file

@ -35,7 +35,7 @@ func TestMountInit(t *testing.T) {
rwLayerOpts := &CreateRWLayerOpts{ rwLayerOpts := &CreateRWLayerOpts{
InitFunc: mountInit, InitFunc: mountInit,
} }
m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), rwLayerOpts) m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), runtime.GOOS, rwLayerOpts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -95,7 +95,7 @@ func TestMountSize(t *testing.T) {
InitFunc: mountInit, InitFunc: mountInit,
} }
m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), rwLayerOpts) m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), runtime.GOOS, rwLayerOpts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -147,7 +147,7 @@ func TestMountChanges(t *testing.T) {
InitFunc: mountInit, InitFunc: mountInit,
} }
m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), rwLayerOpts) m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), runtime.GOOS, rwLayerOpts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -2,6 +2,7 @@ package layer
import ( import (
"io" "io"
"runtime"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/containerfs"
@ -14,6 +15,7 @@ type mountedLayer struct {
parent *roLayer parent *roLayer
path string path string
layerStore *layerStore layerStore *layerStore
os string
references map[RWLayer]*referencedRWLayer references map[RWLayer]*referencedRWLayer
} }
@ -29,7 +31,7 @@ func (ml *mountedLayer) cacheParent() string {
} }
func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { func (ml *mountedLayer) TarStream() (io.ReadCloser, error) {
return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) return ml.layerStore.drivers[ml.OS()].Diff(ml.mountID, ml.cacheParent())
} }
func (ml *mountedLayer) Name() string { func (ml *mountedLayer) Name() string {
@ -46,16 +48,24 @@ func (ml *mountedLayer) Parent() Layer {
return nil return nil
} }
func (ml *mountedLayer) OS() string {
// For backwards compatibility, return the host OS if not set.
if ml.os == "" {
return runtime.GOOS
}
return ml.os
}
func (ml *mountedLayer) Size() (int64, error) { func (ml *mountedLayer) Size() (int64, error) {
return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) return ml.layerStore.drivers[ml.OS()].DiffSize(ml.mountID, ml.cacheParent())
} }
func (ml *mountedLayer) Changes() ([]archive.Change, error) { func (ml *mountedLayer) Changes() ([]archive.Change, error) {
return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) return ml.layerStore.drivers[ml.OS()].Changes(ml.mountID, ml.cacheParent())
} }
func (ml *mountedLayer) Metadata() (map[string]string, error) { func (ml *mountedLayer) Metadata() (map[string]string, error) {
return ml.layerStore.driver.GetMetadata(ml.mountID) return ml.layerStore.drivers[ml.OS()].GetMetadata(ml.mountID)
} }
func (ml *mountedLayer) getReference() RWLayer { func (ml *mountedLayer) getReference() RWLayer {
@ -90,11 +100,11 @@ type referencedRWLayer struct {
} }
func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) { func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) {
return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) return rl.layerStore.drivers[rl.OS()].Get(rl.mountedLayer.mountID, mountLabel)
} }
// Unmount decrements the activity count and unmounts the underlying layer // Unmount decrements the activity count and unmounts the underlying layer
// Callers should only call `Unmount` once per call to `Mount`, even on error. // Callers should only call `Unmount` once per call to `Mount`, even on error.
func (rl *referencedRWLayer) Unmount() error { func (rl *referencedRWLayer) Unmount() error {
return rl.layerStore.driver.Put(rl.mountedLayer.mountID) return rl.layerStore.drivers[rl.OS()].Put(rl.mountedLayer.mountID)
} }

View file

@ -16,7 +16,7 @@ type roLayer struct {
size int64 size int64
layerStore *layerStore layerStore *layerStore
descriptor distribution.Descriptor descriptor distribution.Descriptor
os OS os string
referenceCount int referenceCount int
references map[Layer]struct{} references map[Layer]struct{}
@ -52,7 +52,7 @@ func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) {
if parent != ChainID("") && parentCacheID == "" { if parent != ChainID("") && parentCacheID == "" {
return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent)
} }
return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) return rl.layerStore.drivers[rl.OS()].Diff(rl.cacheID, parentCacheID)
} }
func (rl *roLayer) ChainID() ChainID { func (rl *roLayer) ChainID() ChainID {
@ -86,7 +86,7 @@ func (rl *roLayer) DiffSize() (size int64, err error) {
} }
func (rl *roLayer) Metadata() (map[string]string, error) { func (rl *roLayer) Metadata() (map[string]string, error) {
return rl.layerStore.driver.GetMetadata(rl.cacheID) return rl.layerStore.drivers[rl.OS()].GetMetadata(rl.cacheID)
} }
type referencedCacheLayer struct { type referencedCacheLayer struct {

View file

@ -2,6 +2,8 @@
package layer package layer
func (rl *roLayer) OS() OS { import "runtime"
return ""
func (rl *roLayer) OS() string {
return runtime.GOOS
} }

View file

@ -8,7 +8,7 @@ func (rl *roLayer) Descriptor() distribution.Descriptor {
return rl.descriptor return rl.descriptor
} }
func (rl *roLayer) OS() OS { func (rl *roLayer) OS() string {
if rl.os == "" { if rl.os == "" {
return "windows" return "windows"
} }

View file

@ -94,7 +94,7 @@ func TestMigrateContainers(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
is, err := image.NewImageStore(ifs, runtime.GOOS, ls) is, err := image.NewImageStore(ifs, ls)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -172,12 +172,12 @@ func TestMigrateImages(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
is, err := image.NewImageStore(ifs, runtime.GOOS, ls) is, err := image.NewImageStore(ifs, ls)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution"), runtime.GOOS) ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -430,7 +430,7 @@ func (l *mockLayer) DiffSize() (int64, error) {
return 0, nil return 0, nil
} }
func (l *mockLayer) OS() layer.OS { func (l *mockLayer) OS() string {
return "" return ""
} }

View file

@ -11,7 +11,7 @@ var lcowSupported = false
// 2. Remove the getenv check when image-store is coalesced as shouldn't be needed anymore. // 2. Remove the getenv check when image-store is coalesced as shouldn't be needed anymore.
func InitLCOW(experimental bool) { func InitLCOW(experimental bool) {
v := GetOSVersion() v := GetOSVersion()
if experimental && v.Build > 16270 && os.Getenv("LCOW_SUPPORTED") != "" { if experimental && v.Build > 16278 && os.Getenv("LCOW_SUPPORTED") != "" {
lcowSupported = true lcowSupported = true
} }
} }

View file

@ -145,7 +145,7 @@ func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) {
return s.config, nil return s.config, nil
} }
func (s *tempConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) { func (s *tempConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) {
return configToRootFS(c) return configToRootFS(c)
} }
@ -532,7 +532,7 @@ func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) {
return ioutil.ReadAll(rwc) return ioutil.ReadAll(rwc)
} }
func (s *pluginConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) { func (s *pluginConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) {
return configToRootFS(c) return configToRootFS(c)
} }

View file

@ -126,8 +126,7 @@ type downloadManager struct {
configDigest digest.Digest configDigest digest.Digest
} }
func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os layer.OS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
// TODO @jhowardmsft LCOW: May need revisiting.
for _, l := range layers { for _, l := range layers {
b, err := dm.blobStore.New() b, err := dm.blobStore.New()
if err != nil { if err != nil {
@ -179,6 +178,6 @@ func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) {
func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) {
return nil, fmt.Errorf("digest not found") return nil, fmt.Errorf("digest not found")
} }
func (dm *downloadManager) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) { func (dm *downloadManager) RootFSAndOSFromConfig(c []byte) (*image.RootFS, string, error) {
return configToRootFS(c) return configToRootFS(c)
} }

View file

@ -375,12 +375,9 @@ func isEqualPrivilege(a, b types.PluginPrivilege) bool {
return reflect.DeepEqual(a.Value, b.Value) return reflect.DeepEqual(a.Value, b.Value)
} }
func configToRootFS(c []byte) (*image.RootFS, layer.OS, error) { func configToRootFS(c []byte) (*image.RootFS, string, error) {
// TODO @jhowardmsft LCOW - Will need to revisit this. For now, calculate the operating system. // TODO @jhowardmsft LCOW - Will need to revisit this.
os := layer.OS(runtime.GOOS) os := runtime.GOOS
if system.LCOWSupported() {
os = "linux"
}
var pluginConfig types.PluginConfig var pluginConfig types.PluginConfig
if err := json.Unmarshal(c, &pluginConfig); err != nil { if err := json.Unmarshal(c, &pluginConfig); err != nil {
return nil, "", err return nil, "", err