2015-11-18 17:15:00 -05:00
|
|
|
package layer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2017-05-19 13:38:47 -04:00
|
|
|
"strings"
|
2015-11-18 17:15:00 -05:00
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/Sirupsen/logrus"
|
2016-05-25 22:11:51 -04:00
|
|
|
"github.com/docker/distribution"
|
2015-11-18 17:15:00 -05:00
|
|
|
"github.com/docker/docker/daemon/graphdriver"
|
2015-12-16 15:32:16 -05:00
|
|
|
"github.com/docker/docker/pkg/idtools"
|
2016-10-07 16:53:14 -04:00
|
|
|
"github.com/docker/docker/pkg/plugingetter"
|
2015-11-18 17:15:00 -05:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2017-05-19 13:38:47 -04:00
|
|
|
"github.com/docker/docker/pkg/system"
|
2017-01-06 20:23:18 -05:00
|
|
|
"github.com/opencontainers/go-digest"
|
2015-11-18 17:15:00 -05:00
|
|
|
"github.com/vbatts/tar-split/tar/asm"
|
|
|
|
"github.com/vbatts/tar-split/tar/storage"
|
|
|
|
)
|
|
|
|
|
|
|
|
// maxLayerDepth represents the maximum number of
|
|
|
|
// layers which can be chained together. 125 was
|
|
|
|
// chosen to account for the 127 max in some
|
|
|
|
// graphdrivers plus the 2 additional layers
|
|
|
|
// used to create a rwlayer.
|
|
|
|
const maxLayerDepth = 125
|
|
|
|
|
|
|
|
type layerStore struct {
|
|
|
|
store MetadataStore
|
|
|
|
driver graphdriver.Driver
|
|
|
|
|
|
|
|
layerMap map[ChainID]*roLayer
|
|
|
|
layerL sync.Mutex
|
|
|
|
|
|
|
|
mounts map[string]*mountedLayer
|
|
|
|
mountL sync.Mutex
|
2017-03-20 14:38:17 -04:00
|
|
|
|
|
|
|
useTarSplit bool
|
2017-05-19 13:38:47 -04:00
|
|
|
|
|
|
|
platform string
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2015-12-16 15:32:16 -05:00
|
|
|
// StoreOptions are the options used to create a new Store instance
|
|
|
|
type StoreOptions struct {
|
|
|
|
StorePath string
|
|
|
|
MetadataStorePathTemplate string
|
|
|
|
GraphDriver string
|
|
|
|
GraphDriverOptions []string
|
2017-05-19 18:06:46 -04:00
|
|
|
IDMappings *idtools.IDMappings
|
2016-10-07 16:53:14 -04:00
|
|
|
PluginGetter plugingetter.PluginGetter
|
2016-11-19 11:41:07 -05:00
|
|
|
ExperimentalEnabled bool
|
2017-05-19 13:38:47 -04:00
|
|
|
Platform string
|
2015-12-16 15:32:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewStoreFromOptions creates a new Store instance
|
|
|
|
func NewStoreFromOptions(options StoreOptions) (Store, error) {
|
2016-11-19 11:41:07 -05:00
|
|
|
driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{
|
|
|
|
Root: options.StorePath,
|
|
|
|
DriverOptions: options.GraphDriverOptions,
|
2017-05-19 18:06:46 -04:00
|
|
|
UIDMaps: options.IDMappings.UIDs(),
|
|
|
|
GIDMaps: options.IDMappings.GIDs(),
|
2016-11-19 11:41:07 -05:00
|
|
|
ExperimentalEnabled: options.ExperimentalEnabled,
|
|
|
|
})
|
2015-12-16 15:32:16 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error initializing graphdriver: %v", err)
|
|
|
|
}
|
|
|
|
logrus.Debugf("Using graph driver %s", driver)
|
|
|
|
|
|
|
|
fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-05-19 13:38:47 -04:00
|
|
|
return NewStoreFromGraphDriver(fms, driver, options.Platform)
|
2015-12-16 15:32:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewStoreFromGraphDriver creates a new Store instance using the provided
|
|
|
|
// metadata store and graph driver. The metadata store will be used to restore
|
2015-11-18 17:15:00 -05:00
|
|
|
// the Store.
|
2017-05-19 13:38:47 -04:00
|
|
|
func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, platform string) (Store, error) {
|
2017-03-20 14:38:17 -04:00
|
|
|
caps := graphdriver.Capabilities{}
|
|
|
|
if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok {
|
|
|
|
caps = capDriver.Capabilities()
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
ls := &layerStore{
|
2017-03-20 14:38:17 -04:00
|
|
|
store: store,
|
|
|
|
driver: driver,
|
|
|
|
layerMap: map[ChainID]*roLayer{},
|
|
|
|
mounts: map[string]*mountedLayer{},
|
|
|
|
useTarSplit: !caps.ReproducesExactDiffs,
|
2017-05-19 13:38:47 -04:00
|
|
|
platform: platform,
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
ids, mounts, err := store.List()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range ids {
|
|
|
|
l, err := ls.loadLayer(id)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("Failed to load layer %s: %s", id, err)
|
2016-01-19 14:17:08 -05:00
|
|
|
continue
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
if l.parent != nil {
|
|
|
|
l.parent.referenceCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, mount := range mounts {
|
|
|
|
if err := ls.loadMount(mount); err != nil {
|
|
|
|
logrus.Debugf("Failed to load mount %s: %s", mount, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ls, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
|
|
|
|
cl, ok := ls.layerMap[layer]
|
|
|
|
if ok {
|
|
|
|
return cl, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
diff, err := ls.store.GetDiffID(layer)
|
|
|
|
if err != nil {
|
2016-01-19 14:17:08 -05:00
|
|
|
return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
size, err := ls.store.GetSize(layer)
|
|
|
|
if err != nil {
|
2016-01-19 14:17:08 -05:00
|
|
|
return nil, fmt.Errorf("failed to get size for %s: %s", layer, err)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
cacheID, err := ls.store.GetCacheID(layer)
|
|
|
|
if err != nil {
|
2016-01-19 14:17:08 -05:00
|
|
|
return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
parent, err := ls.store.GetParent(layer)
|
|
|
|
if err != nil {
|
2016-01-19 14:17:08 -05:00
|
|
|
return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2016-06-06 20:49:34 -04:00
|
|
|
descriptor, err := ls.store.GetDescriptor(layer)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err)
|
|
|
|
}
|
|
|
|
|
2017-04-25 12:37:29 -04:00
|
|
|
platform, err := ls.store.GetPlatform(layer)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to get platform for %s: %s", layer, err)
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
cl = &roLayer{
|
|
|
|
chainID: layer,
|
|
|
|
diffID: diff,
|
|
|
|
size: size,
|
|
|
|
cacheID: cacheID,
|
|
|
|
layerStore: ls,
|
|
|
|
references: map[Layer]struct{}{},
|
2016-06-06 20:49:34 -04:00
|
|
|
descriptor: descriptor,
|
2017-04-25 12:37:29 -04:00
|
|
|
platform: platform,
|
2016-05-25 22:11:51 -04:00
|
|
|
}
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
if parent != "" {
|
|
|
|
p, err := ls.loadLayer(parent)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cl.parent = p
|
|
|
|
}
|
|
|
|
|
|
|
|
ls.layerMap[cl.chainID] = cl
|
|
|
|
|
|
|
|
return cl, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) loadMount(mount string) error {
|
|
|
|
if _, ok := ls.mounts[mount]; ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
mountID, err := ls.store.GetMountID(mount)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
initID, err := ls.store.GetInitID(mount)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
parent, err := ls.store.GetMountParent(mount)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ml := &mountedLayer{
|
|
|
|
name: mount,
|
|
|
|
mountID: mountID,
|
|
|
|
initID: initID,
|
|
|
|
layerStore: ls,
|
2015-12-16 17:13:50 -05:00
|
|
|
references: map[RWLayer]*referencedRWLayer{},
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if parent != "" {
|
|
|
|
p, err := ls.loadLayer(parent)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ml.parent = p
|
|
|
|
|
|
|
|
p.referenceCount++
|
|
|
|
}
|
|
|
|
|
|
|
|
ls.mounts[ml.name] = ml
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error {
|
2017-01-06 20:23:18 -05:00
|
|
|
digester := digest.Canonical.Digester()
|
2015-11-18 17:15:00 -05:00
|
|
|
tr := io.TeeReader(ts, digester.Hash())
|
|
|
|
|
2017-03-20 14:38:17 -04:00
|
|
|
rdr := tr
|
|
|
|
if ls.useTarSplit {
|
|
|
|
tsw, err := tx.TarSplitWriter(true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
metaPacker := storage.NewJSONPacker(tsw)
|
|
|
|
defer tsw.Close()
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2017-03-20 14:38:17 -04:00
|
|
|
// we're passing nil here for the file putter, because the ApplyDiff will
|
|
|
|
// handle the extraction of the archive
|
|
|
|
rdr, err = asm.NewInputTarStream(tr, metaPacker, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2016-10-20 19:40:59 -04:00
|
|
|
applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr)
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Discard trailing data but ensure metadata is picked up to reconstruct stream
|
|
|
|
io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed
|
|
|
|
|
|
|
|
layer.size = applySize
|
|
|
|
layer.diffID = DiffID(digester.Digest())
|
|
|
|
|
|
|
|
logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-25 19:45:42 -04:00
|
|
|
func (ls *layerStore) Register(ts io.Reader, parent ChainID, platform Platform) (Layer, error) {
|
|
|
|
return ls.registerWithDescriptor(ts, parent, platform, distribution.Descriptor{})
|
2016-05-25 22:11:51 -04:00
|
|
|
}
|
|
|
|
|
2017-04-25 19:45:42 -04:00
|
|
|
func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, platform Platform, descriptor distribution.Descriptor) (Layer, error) {
|
2015-11-18 17:15:00 -05:00
|
|
|
// err is used to hold the error which will always trigger
|
|
|
|
// cleanup of creates sources but may not be an error returned
|
|
|
|
// to the caller (already exists).
|
|
|
|
var err error
|
|
|
|
var pid string
|
|
|
|
var p *roLayer
|
2017-05-19 13:38:47 -04:00
|
|
|
|
|
|
|
// Integrity check - ensure we are creating something for the correct platform
|
2017-06-26 12:11:54 -04:00
|
|
|
if system.LCOWSupported() {
|
2017-05-19 13:38:47 -04:00
|
|
|
if strings.ToLower(ls.platform) != strings.ToLower(string(platform)) {
|
|
|
|
return nil, fmt.Errorf("cannot create entry for platform %q in layer store for platform %q", platform, ls.platform)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
if string(parent) != "" {
|
|
|
|
p = ls.get(parent)
|
|
|
|
if p == nil {
|
|
|
|
return nil, ErrLayerDoesNotExist
|
|
|
|
}
|
|
|
|
pid = p.cacheID
|
|
|
|
// Release parent chain if error
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
ls.layerL.Lock()
|
|
|
|
ls.releaseLayer(p)
|
|
|
|
ls.layerL.Unlock()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if p.depth() >= maxLayerDepth {
|
|
|
|
err = ErrMaxDepthExceeded
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create new roLayer
|
|
|
|
layer := &roLayer{
|
|
|
|
parent: p,
|
|
|
|
cacheID: stringid.GenerateRandomID(),
|
|
|
|
referenceCount: 1,
|
|
|
|
layerStore: ls,
|
|
|
|
references: map[Layer]struct{}{},
|
2016-06-06 20:49:34 -04:00
|
|
|
descriptor: descriptor,
|
2017-04-25 19:45:42 -04:00
|
|
|
platform: platform,
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2016-11-09 15:59:58 -05:00
|
|
|
if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil {
|
2015-11-18 17:15:00 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
tx, err := ls.store.StartTransaction()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err)
|
|
|
|
if err := ls.driver.Remove(layer.cacheID); err != nil {
|
|
|
|
logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err)
|
|
|
|
}
|
|
|
|
if err := tx.Cancel(); err != nil {
|
|
|
|
logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if err = ls.applyTar(tx, ts, pid, layer); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if layer.parent == nil {
|
|
|
|
layer.chainID = ChainID(layer.diffID)
|
|
|
|
} else {
|
|
|
|
layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = storeLayer(tx, layer); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
|
2015-11-20 07:35:01 -05:00
|
|
|
if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
|
2015-11-18 17:15:00 -05:00
|
|
|
// Set error for cleanup, but do not return the error
|
|
|
|
err = errors.New("layer already exists")
|
|
|
|
return existingLayer.getReference(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = tx.Commit(layer.chainID); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ls.layerMap[layer.chainID] = layer
|
|
|
|
|
|
|
|
return layer.getReference(), nil
|
|
|
|
}
|
|
|
|
|
2015-11-20 07:35:01 -05:00
|
|
|
func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer {
|
|
|
|
l, ok := ls.layerMap[layer]
|
2015-11-18 17:15:00 -05:00
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-11-20 07:35:01 -05:00
|
|
|
l.referenceCount++
|
|
|
|
|
|
|
|
return l
|
|
|
|
}
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2015-11-20 07:35:01 -05:00
|
|
|
func (ls *layerStore) get(l ChainID) *roLayer {
|
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
return ls.getWithoutLock(l)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) Get(l ChainID) (Layer, error) {
|
2016-03-30 22:34:51 -04:00
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
|
|
|
|
layer := ls.getWithoutLock(l)
|
2015-11-18 17:15:00 -05:00
|
|
|
if layer == nil {
|
|
|
|
return nil, ErrLayerDoesNotExist
|
|
|
|
}
|
|
|
|
|
|
|
|
return layer.getReference(), nil
|
|
|
|
}
|
|
|
|
|
2016-08-23 19:08:43 -04:00
|
|
|
func (ls *layerStore) Map() map[ChainID]Layer {
|
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
|
|
|
|
layers := map[ChainID]Layer{}
|
|
|
|
|
|
|
|
for k, v := range ls.layerMap {
|
|
|
|
layers[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
return layers
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
|
|
|
|
err := ls.driver.Remove(layer.cacheID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = ls.store.Remove(layer.chainID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
metadata.DiffID = layer.diffID
|
|
|
|
metadata.ChainID = layer.chainID
|
|
|
|
metadata.Size, err = layer.Size()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
metadata.DiffSize = layer.size
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-11-20 08:35:43 -05:00
|
|
|
func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) {
|
|
|
|
depth := 0
|
|
|
|
removed := []Metadata{}
|
|
|
|
for {
|
|
|
|
if l.referenceCount == 0 {
|
|
|
|
panic("layer not retained")
|
|
|
|
}
|
|
|
|
l.referenceCount--
|
|
|
|
if l.referenceCount != 0 {
|
|
|
|
return removed, nil
|
|
|
|
}
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2015-11-20 08:35:43 -05:00
|
|
|
if len(removed) == 0 && depth > 0 {
|
|
|
|
panic("cannot remove layer with child")
|
|
|
|
}
|
|
|
|
if l.hasReferences() {
|
|
|
|
panic("cannot delete referenced layer")
|
|
|
|
}
|
|
|
|
var metadata Metadata
|
|
|
|
if err := ls.deleteLayer(l, &metadata); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2015-11-20 08:35:43 -05:00
|
|
|
delete(ls.layerMap, l.chainID)
|
|
|
|
removed = append(removed, metadata)
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2015-11-20 08:35:43 -05:00
|
|
|
if l.parent == nil {
|
|
|
|
return removed, nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2015-11-20 08:35:43 -05:00
|
|
|
depth++
|
|
|
|
l = l.parent
|
|
|
|
}
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
|
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
layer, ok := ls.layerMap[l.ChainID()]
|
|
|
|
if !ok {
|
|
|
|
return []Metadata{}, nil
|
|
|
|
}
|
|
|
|
if !layer.hasReference(l) {
|
|
|
|
return nil, ErrLayerNotRetained
|
|
|
|
}
|
|
|
|
|
|
|
|
layer.deleteReference(l)
|
|
|
|
|
|
|
|
return ls.releaseLayer(layer)
|
|
|
|
}
|
|
|
|
|
2016-11-16 16:31:23 -05:00
|
|
|
func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) {
|
|
|
|
var (
|
|
|
|
storageOpt map[string]string
|
|
|
|
initFunc MountInit
|
|
|
|
mountLabel string
|
|
|
|
)
|
|
|
|
|
|
|
|
if opts != nil {
|
|
|
|
mountLabel = opts.MountLabel
|
|
|
|
storageOpt = opts.StorageOpt
|
|
|
|
initFunc = opts.InitFunc
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
ls.mountL.Lock()
|
|
|
|
defer ls.mountL.Unlock()
|
|
|
|
m, ok := ls.mounts[name]
|
|
|
|
if ok {
|
2015-12-16 17:13:50 -05:00
|
|
|
return nil, ErrMountNameConflict
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2015-12-16 17:13:50 -05:00
|
|
|
var err error
|
2015-11-18 17:15:00 -05:00
|
|
|
var pid string
|
|
|
|
var p *roLayer
|
|
|
|
if string(parent) != "" {
|
2015-11-20 07:35:01 -05:00
|
|
|
p = ls.get(parent)
|
2015-11-18 17:15:00 -05:00
|
|
|
if p == nil {
|
|
|
|
return nil, ErrLayerDoesNotExist
|
|
|
|
}
|
|
|
|
pid = p.cacheID
|
|
|
|
|
|
|
|
// Release parent chain if error
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
ls.layerL.Lock()
|
|
|
|
ls.releaseLayer(p)
|
|
|
|
ls.layerL.Unlock()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
m = &mountedLayer{
|
|
|
|
name: name,
|
|
|
|
parent: p,
|
2015-12-16 17:13:50 -05:00
|
|
|
mountID: ls.mountID(name),
|
2015-11-18 17:15:00 -05:00
|
|
|
layerStore: ls,
|
2015-12-16 17:13:50 -05:00
|
|
|
references: map[RWLayer]*referencedRWLayer{},
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if initFunc != nil {
|
2016-03-20 00:42:58 -04:00
|
|
|
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt)
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m.initID = pid
|
|
|
|
}
|
|
|
|
|
2016-11-09 15:59:58 -05:00
|
|
|
createOpts := &graphdriver.CreateOpts{
|
|
|
|
StorageOpt: storageOpt,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil {
|
2015-11-18 17:15:00 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err = ls.saveMount(m); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-12-16 17:13:50 -05:00
|
|
|
return m.getReference(), nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2015-12-16 17:13:50 -05:00
|
|
|
func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) {
|
2015-11-18 17:15:00 -05:00
|
|
|
ls.mountL.Lock()
|
|
|
|
defer ls.mountL.Unlock()
|
2015-12-16 17:13:50 -05:00
|
|
|
mount, ok := ls.mounts[id]
|
|
|
|
if !ok {
|
|
|
|
return nil, ErrMountDoesNotExist
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2015-12-16 17:13:50 -05:00
|
|
|
return mount.getReference(), nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2016-03-18 14:50:19 -04:00
|
|
|
func (ls *layerStore) GetMountID(id string) (string, error) {
|
|
|
|
ls.mountL.Lock()
|
|
|
|
defer ls.mountL.Unlock()
|
|
|
|
mount, ok := ls.mounts[id]
|
|
|
|
if !ok {
|
|
|
|
return "", ErrMountDoesNotExist
|
|
|
|
}
|
2016-03-04 17:41:53 -05:00
|
|
|
logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID)
|
2016-03-18 14:50:19 -04:00
|
|
|
|
|
|
|
return mount.mountID, nil
|
|
|
|
}
|
|
|
|
|
2015-12-16 17:13:50 -05:00
|
|
|
func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
|
2015-11-18 17:15:00 -05:00
|
|
|
ls.mountL.Lock()
|
|
|
|
defer ls.mountL.Unlock()
|
2015-12-16 17:13:50 -05:00
|
|
|
m, ok := ls.mounts[l.Name()]
|
|
|
|
if !ok {
|
|
|
|
return []Metadata{}, nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
2015-12-16 17:13:50 -05:00
|
|
|
|
|
|
|
if err := m.deleteReference(l); err != nil {
|
|
|
|
return nil, err
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2015-12-16 17:13:50 -05:00
|
|
|
if m.hasReferences() {
|
|
|
|
return []Metadata{}, nil
|
|
|
|
}
|
2015-11-18 17:15:00 -05:00
|
|
|
|
|
|
|
if err := ls.driver.Remove(m.mountID); err != nil {
|
|
|
|
logrus.Errorf("Error removing mounted layer %s: %s", m.name, err)
|
2016-02-19 13:42:29 -05:00
|
|
|
m.retakeReference(l)
|
2015-11-18 17:15:00 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.initID != "" {
|
|
|
|
if err := ls.driver.Remove(m.initID); err != nil {
|
|
|
|
logrus.Errorf("Error removing init layer %s: %s", m.name, err)
|
2016-02-19 13:42:29 -05:00
|
|
|
m.retakeReference(l)
|
2015-11-18 17:15:00 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ls.store.RemoveMount(m.name); err != nil {
|
|
|
|
logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err)
|
2016-02-19 13:42:29 -05:00
|
|
|
m.retakeReference(l)
|
2015-11-18 17:15:00 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-12-16 17:13:50 -05:00
|
|
|
delete(ls.mounts, m.Name())
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
if m.parent != nil {
|
|
|
|
return ls.releaseLayer(m.parent)
|
|
|
|
}
|
|
|
|
|
|
|
|
return []Metadata{}, nil
|
|
|
|
}
|
|
|
|
|
2015-12-16 17:13:50 -05:00
|
|
|
func (ls *layerStore) saveMount(mount *mountedLayer) error {
|
|
|
|
if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil {
|
|
|
|
return err
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
2015-12-16 17:13:50 -05:00
|
|
|
|
|
|
|
if mount.initID != "" {
|
|
|
|
if err := ls.store.SetInitID(mount.name, mount.initID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
2015-12-16 17:13:50 -05:00
|
|
|
|
|
|
|
if mount.parent != nil {
|
|
|
|
if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ls.mounts[mount.name] = mount
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-03-20 00:42:58 -04:00
|
|
|
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
|
2015-12-16 17:13:50 -05:00
|
|
|
// Use "<graph-id>-init" to maintain compatibility with graph drivers
|
|
|
|
// which are expecting this layer with this special name. If all
|
|
|
|
// graph drivers can be updated to not rely on knowing about this layer
|
|
|
|
// then the initID should be randomly generated.
|
|
|
|
initID := fmt.Sprintf("%s-init", graphID)
|
|
|
|
|
2016-11-09 15:59:58 -05:00
|
|
|
createOpts := &graphdriver.CreateOpts{
|
|
|
|
MountLabel: mountLabel,
|
|
|
|
StorageOpt: storageOpt,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil {
|
2015-12-16 17:13:50 -05:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
p, err := ls.driver.Get(initID, "")
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := initFunc(p); err != nil {
|
|
|
|
ls.driver.Put(initID)
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ls.driver.Put(initID); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return initID, nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2017-03-20 14:38:17 -04:00
|
|
|
func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) {
|
|
|
|
if !ls.useTarSplit {
|
|
|
|
var parentCacheID string
|
|
|
|
if rl.parent != nil {
|
|
|
|
parentCacheID = rl.parent.cacheID
|
|
|
|
}
|
|
|
|
|
|
|
|
return ls.driver.Diff(rl.cacheID, parentCacheID)
|
|
|
|
}
|
|
|
|
|
|
|
|
r, err := ls.store.TarSplitReader(rl.chainID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pr, pw := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
err := ls.assembleTarTo(rl.cacheID, r, nil, pw)
|
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(err)
|
|
|
|
} else {
|
|
|
|
pw.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return pr, nil
|
|
|
|
}
|
|
|
|
|
2015-11-29 22:55:22 -05:00
|
|
|
func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
|
2016-02-18 20:58:23 -05:00
|
|
|
diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver)
|
2015-11-18 17:15:00 -05:00
|
|
|
if !ok {
|
|
|
|
diffDriver = &naiveDiffPathDriver{ls.driver}
|
|
|
|
}
|
|
|
|
|
2015-11-29 22:55:22 -05:00
|
|
|
defer metadata.Close()
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
// get our relative path to the container
|
2016-02-18 20:58:23 -05:00
|
|
|
fileGetCloser, err := diffDriver.DiffGetter(graphID)
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
2015-11-29 22:55:22 -05:00
|
|
|
return err
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
2016-02-18 20:58:23 -05:00
|
|
|
defer fileGetCloser.Close()
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2015-11-29 22:55:22 -05:00
|
|
|
metaUnpacker := storage.NewJSONUnpacker(metadata)
|
|
|
|
upackerCounter := &unpackSizeCounter{metaUnpacker, size}
|
2016-02-18 20:58:23 -05:00
|
|
|
logrus.Debugf("Assembling tar data for %s", graphID)
|
|
|
|
return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2015-12-16 15:32:16 -05:00
|
|
|
func (ls *layerStore) Cleanup() error {
|
|
|
|
return ls.driver.Cleanup()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) DriverStatus() [][2]string {
|
|
|
|
return ls.driver.Status()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) DriverName() string {
|
|
|
|
return ls.driver.String()
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
type naiveDiffPathDriver struct {
|
|
|
|
graphdriver.Driver
|
|
|
|
}
|
|
|
|
|
2016-02-18 20:58:23 -05:00
|
|
|
type fileGetPutter struct {
|
|
|
|
storage.FileGetter
|
|
|
|
driver graphdriver.Driver
|
|
|
|
id string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *fileGetPutter) Close() error {
|
|
|
|
return w.driver.Put(w.id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
2015-11-18 17:15:00 -05:00
|
|
|
p, err := n.Driver.Get(id, "")
|
|
|
|
if err != nil {
|
2016-02-18 20:58:23 -05:00
|
|
|
return nil, err
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
2016-02-18 20:58:23 -05:00
|
|
|
return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|