2018-02-05 16:05:59 -05:00
|
|
|
package plugin // import "github.com/docker/docker/plugin"
|
2016-12-12 18:05:53 -05:00
|
|
|
|
|
|
|
import (
|
2018-04-19 18:30:59 -04:00
|
|
|
"context"
|
2017-01-06 20:23:18 -05:00
|
|
|
"fmt"
|
2016-12-12 18:05:53 -05:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2018-02-15 16:17:27 -05:00
|
|
|
"runtime"
|
2016-12-12 18:05:53 -05:00
|
|
|
|
|
|
|
"github.com/docker/docker/distribution/xfer"
|
|
|
|
"github.com/docker/docker/image"
|
|
|
|
"github.com/docker/docker/layer"
|
|
|
|
"github.com/docker/docker/pkg/archive"
|
2017-05-18 17:53:46 -04:00
|
|
|
"github.com/docker/docker/pkg/chrootarchive"
|
2016-12-12 18:05:53 -05:00
|
|
|
"github.com/docker/docker/pkg/progress"
|
2019-08-05 10:37:47 -04:00
|
|
|
digest "github.com/opencontainers/go-digest"
|
2018-02-15 16:17:27 -05:00
|
|
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
2016-12-12 18:05:53 -05:00
|
|
|
"github.com/pkg/errors"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2016-12-12 18:05:53 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
type blobstore interface {
|
|
|
|
New() (WriteCommitCloser, error)
|
|
|
|
Get(dgst digest.Digest) (io.ReadCloser, error)
|
|
|
|
Size(dgst digest.Digest) (int64, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
type basicBlobStore struct {
|
|
|
|
path string
|
|
|
|
}
|
|
|
|
|
|
|
|
func newBasicBlobStore(p string) (*basicBlobStore, error) {
|
|
|
|
tmpdir := filepath.Join(p, "tmp")
|
|
|
|
if err := os.MkdirAll(tmpdir, 0700); err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "failed to mkdir %v", p)
|
|
|
|
}
|
|
|
|
return &basicBlobStore{path: p}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *basicBlobStore) New() (WriteCommitCloser, error) {
|
|
|
|
f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion")
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to create temp file")
|
|
|
|
}
|
|
|
|
return newInsertion(f), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) {
|
|
|
|
return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex()))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) {
|
|
|
|
stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex()))
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return stat.Size(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) {
|
|
|
|
for _, alg := range []string{string(digest.Canonical)} {
|
|
|
|
items, err := ioutil.ReadDir(filepath.Join(b.path, alg))
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, fi := range items {
|
|
|
|
if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists {
|
|
|
|
p := filepath.Join(b.path, alg, fi.Name())
|
|
|
|
err := os.RemoveAll(p)
|
|
|
|
logrus.Debugf("cleaned up blob %v: %v", p, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteCommitCloser defines object that can be committed to blobstore.
|
|
|
|
type WriteCommitCloser interface {
|
|
|
|
io.WriteCloser
|
|
|
|
Commit() (digest.Digest, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
type insertion struct {
|
|
|
|
io.Writer
|
|
|
|
f *os.File
|
|
|
|
digester digest.Digester
|
|
|
|
closed bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func newInsertion(tempFile *os.File) *insertion {
|
2017-01-06 20:23:18 -05:00
|
|
|
digester := digest.Canonical.Digester()
|
2016-12-12 18:05:53 -05:00
|
|
|
return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *insertion) Commit() (digest.Digest, error) {
|
|
|
|
p := i.f.Name()
|
|
|
|
d := filepath.Join(filepath.Join(p, "../../"))
|
|
|
|
i.f.Sync()
|
|
|
|
defer os.RemoveAll(p)
|
|
|
|
if err := i.f.Close(); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
i.closed = true
|
|
|
|
dgst := i.digester.Digest()
|
|
|
|
if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil {
|
|
|
|
return "", errors.Wrapf(err, "failed to mkdir %v", d)
|
|
|
|
}
|
|
|
|
if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil {
|
|
|
|
return "", errors.Wrapf(err, "failed to rename %v", p)
|
|
|
|
}
|
|
|
|
return dgst, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *insertion) Close() error {
|
|
|
|
if i.closed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(i.f.Name())
|
|
|
|
return i.f.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
type downloadManager struct {
|
|
|
|
blobStore blobstore
|
|
|
|
tmpDir string
|
|
|
|
blobs []digest.Digest
|
|
|
|
configDigest digest.Digest
|
|
|
|
}
|
|
|
|
|
2017-08-24 14:48:16 -04:00
|
|
|
func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
|
2016-12-12 18:05:53 -05:00
|
|
|
for _, l := range layers {
|
|
|
|
b, err := dm.blobStore.New()
|
|
|
|
if err != nil {
|
|
|
|
return initialRootFS, nil, err
|
|
|
|
}
|
|
|
|
defer b.Close()
|
|
|
|
rc, _, err := l.Download(ctx, progressOutput)
|
|
|
|
if err != nil {
|
|
|
|
return initialRootFS, nil, errors.Wrap(err, "failed to download")
|
|
|
|
}
|
|
|
|
defer rc.Close()
|
|
|
|
r := io.TeeReader(rc, b)
|
|
|
|
inflatedLayerData, err := archive.DecompressStream(r)
|
|
|
|
if err != nil {
|
|
|
|
return initialRootFS, nil, err
|
|
|
|
}
|
2017-07-21 20:47:15 -04:00
|
|
|
defer inflatedLayerData.Close()
|
2017-01-06 20:23:18 -05:00
|
|
|
digester := digest.Canonical.Digester()
|
2017-05-18 17:53:46 -04:00
|
|
|
if _, err := chrootarchive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil {
|
2016-12-12 18:05:53 -05:00
|
|
|
return initialRootFS, nil, err
|
|
|
|
}
|
|
|
|
initialRootFS.Append(layer.DiffID(digester.Digest()))
|
|
|
|
d, err := b.Commit()
|
|
|
|
if err != nil {
|
|
|
|
return initialRootFS, nil, err
|
|
|
|
}
|
|
|
|
dm.blobs = append(dm.blobs, d)
|
|
|
|
}
|
|
|
|
return initialRootFS, nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) {
|
|
|
|
b, err := dm.blobStore.New()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer b.Close()
|
|
|
|
n, err := b.Write(dt)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if n != len(dt) {
|
|
|
|
return "", io.ErrShortWrite
|
|
|
|
}
|
|
|
|
d, err := b.Commit()
|
|
|
|
dm.configDigest = d
|
|
|
|
return d, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) {
|
2017-01-06 20:23:18 -05:00
|
|
|
return nil, fmt.Errorf("digest not found")
|
2016-12-12 18:05:53 -05:00
|
|
|
}
|
2018-02-15 16:17:27 -05:00
|
|
|
func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) {
|
2016-12-12 18:05:53 -05:00
|
|
|
return configToRootFS(c)
|
|
|
|
}
|
2018-02-15 16:17:27 -05:00
|
|
|
func (dm *downloadManager) PlatformFromConfig(c []byte) (*specs.Platform, error) {
|
|
|
|
// TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS
|
|
|
|
return &specs.Platform{OS: runtime.GOOS}, nil
|
|
|
|
}
|