2018-02-05 16:05:59 -05:00
|
|
|
package layer // import "github.com/docker/docker/layer"
|
2015-11-18 17:15:00 -05:00
|
|
|
|
|
|
|
import (
|
|
|
|
"compress/gzip"
|
2016-05-25 22:11:51 -04:00
|
|
|
"encoding/json"
|
2015-11-18 17:15:00 -05:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"regexp"
|
|
|
|
"strconv"
|
2016-01-19 14:17:08 -05:00
|
|
|
"strings"
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2016-05-25 22:11:51 -04:00
|
|
|
"github.com/docker/distribution"
|
2015-11-18 17:15:00 -05:00
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
2017-01-06 20:23:18 -05:00
|
|
|
"github.com/opencontainers/go-digest"
|
2018-04-17 18:45:39 -04:00
|
|
|
"github.com/pkg/errors"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2015-11-18 17:15:00 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`)
|
|
|
|
supportedAlgorithms = []digest.Algorithm{
|
|
|
|
digest.SHA256,
|
|
|
|
// digest.SHA384, // Currently not used
|
|
|
|
// digest.SHA512, // Currently not used
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
type fileMetadataStore struct {
|
|
|
|
root string
|
|
|
|
}
|
|
|
|
|
|
|
|
type fileMetadataTransaction struct {
|
|
|
|
store *fileMetadataStore
|
2016-08-09 14:55:17 -04:00
|
|
|
ws *ioutils.AtomicWriteSet
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2018-03-05 18:46:21 -05:00
|
|
|
// newFSMetadataStore returns an instance of a metadata store
|
2015-11-18 17:15:00 -05:00
|
|
|
// which is backed by files on disk using the provided root
|
|
|
|
// as the root of metadata files.
|
2018-03-05 18:46:21 -05:00
|
|
|
func newFSMetadataStore(root string) (*fileMetadataStore, error) {
|
2015-11-18 17:15:00 -05:00
|
|
|
if err := os.MkdirAll(root, 0700); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &fileMetadataStore{
|
|
|
|
root: root,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
|
|
|
|
dgst := digest.Digest(layer)
|
|
|
|
return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
|
|
|
|
return filepath.Join(fms.getLayerDirectory(layer), filename)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) getMountDirectory(mount string) string {
|
|
|
|
return filepath.Join(fms.root, "mounts", mount)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) getMountFilename(mount, filename string) string {
|
|
|
|
return filepath.Join(fms.getMountDirectory(mount), filename)
|
|
|
|
}
|
|
|
|
|
2018-03-05 18:46:21 -05:00
|
|
|
func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) {
|
2015-11-18 17:15:00 -05:00
|
|
|
tmpDir := filepath.Join(fms.root, "tmp")
|
|
|
|
if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-08-09 14:55:17 -04:00
|
|
|
ws, err := ioutils.NewAtomicWriteSet(tmpDir)
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-08-09 14:55:17 -04:00
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
return &fileMetadataTransaction{
|
|
|
|
store: fms,
|
2016-08-09 14:55:17 -04:00
|
|
|
ws: ws,
|
2015-11-18 17:15:00 -05:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fm *fileMetadataTransaction) SetSize(size int64) error {
|
|
|
|
content := fmt.Sprintf("%d", size)
|
2016-08-09 14:55:17 -04:00
|
|
|
return fm.ws.WriteFile("size", []byte(content), 0644)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fm *fileMetadataTransaction) SetParent(parent ChainID) error {
|
2016-08-09 14:55:17 -04:00
|
|
|
return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error {
|
2016-08-09 14:55:17 -04:00
|
|
|
return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
|
2016-08-09 14:55:17 -04:00
|
|
|
return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2016-06-06 20:49:34 -04:00
|
|
|
func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
|
2016-05-25 22:11:51 -04:00
|
|
|
jsonRef, err := json.Marshal(ref)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-09 14:55:17 -04:00
|
|
|
return fm.ws.WriteFile("descriptor.json", jsonRef, 0644)
|
2016-05-25 22:11:51 -04:00
|
|
|
}
|
|
|
|
|
2015-11-29 22:55:22 -05:00
|
|
|
func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
|
2016-08-09 14:55:17 -04:00
|
|
|
f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-11-29 22:55:22 -05:00
|
|
|
var wc io.WriteCloser
|
|
|
|
if compressInput {
|
|
|
|
wc = gzip.NewWriter(f)
|
|
|
|
} else {
|
|
|
|
wc = f
|
|
|
|
}
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2015-11-29 22:55:22 -05:00
|
|
|
return ioutils.NewWriteCloserWrapper(wc, func() error {
|
|
|
|
wc.Close()
|
2015-11-18 17:15:00 -05:00
|
|
|
return f.Close()
|
|
|
|
}), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fm *fileMetadataTransaction) Commit(layer ChainID) error {
|
|
|
|
finalDir := fm.store.getLayerDirectory(layer)
|
|
|
|
if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-09 14:55:17 -04:00
|
|
|
|
|
|
|
return fm.ws.Commit(finalDir)
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fm *fileMetadataTransaction) Cancel() error {
|
2016-08-09 14:55:17 -04:00
|
|
|
return fm.ws.Cancel()
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fm *fileMetadataTransaction) String() string {
|
2016-08-09 14:55:17 -04:00
|
|
|
return fm.ws.String()
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) {
|
|
|
|
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size"))
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
size, err := strconv.ParseInt(string(content), 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return size, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) {
|
|
|
|
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent"))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2017-01-06 20:23:18 -05:00
|
|
|
dgst, err := digest.Parse(strings.TrimSpace(string(content)))
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ChainID(dgst), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) {
|
|
|
|
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff"))
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2017-01-06 20:23:18 -05:00
|
|
|
dgst, err := digest.Parse(strings.TrimSpace(string(content)))
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return DiffID(dgst), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
|
2016-01-19 14:17:08 -05:00
|
|
|
contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id"))
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2016-01-19 14:17:08 -05:00
|
|
|
content := strings.TrimSpace(string(contentBytes))
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2018-04-17 18:45:39 -04:00
|
|
|
if content == "" {
|
|
|
|
return "", errors.Errorf("invalid cache id value")
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2016-01-19 14:17:08 -05:00
|
|
|
return content, nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
2016-06-06 20:49:34 -04:00
|
|
|
func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) {
|
2016-05-25 22:11:51 -04:00
|
|
|
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json"))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
2016-06-06 20:49:34 -04:00
|
|
|
// only return empty descriptor to represent what is stored
|
|
|
|
return distribution.Descriptor{}, nil
|
2016-05-25 22:11:51 -04:00
|
|
|
}
|
|
|
|
return distribution.Descriptor{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var ref distribution.Descriptor
|
|
|
|
err = json.Unmarshal(content, &ref)
|
|
|
|
if err != nil {
|
|
|
|
return distribution.Descriptor{}, err
|
|
|
|
}
|
|
|
|
return ref, err
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
|
|
|
|
fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
f, err := gzip.NewReader(fz)
|
|
|
|
if err != nil {
|
2017-06-19 23:56:25 -04:00
|
|
|
fz.Close()
|
2015-11-18 17:15:00 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ioutils.NewReadCloserWrapper(f, func() error {
|
|
|
|
f.Close()
|
|
|
|
return fz.Close()
|
|
|
|
}), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error {
|
|
|
|
if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) SetInitID(mount string, init string) error {
|
|
|
|
if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error {
|
|
|
|
if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {
|
2016-01-19 14:17:08 -05:00
|
|
|
contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id"))
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2016-01-19 14:17:08 -05:00
|
|
|
content := strings.TrimSpace(string(contentBytes))
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2016-01-19 14:17:08 -05:00
|
|
|
if !stringIDRegexp.MatchString(content) {
|
2015-11-18 17:15:00 -05:00
|
|
|
return "", errors.New("invalid mount id value")
|
|
|
|
}
|
|
|
|
|
2016-01-19 14:17:08 -05:00
|
|
|
return content, nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) GetInitID(mount string) (string, error) {
|
2016-01-19 14:17:08 -05:00
|
|
|
contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id"))
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
return "", err
|
|
|
|
}
|
2016-01-19 14:17:08 -05:00
|
|
|
content := strings.TrimSpace(string(contentBytes))
|
2015-11-18 17:15:00 -05:00
|
|
|
|
2016-01-19 14:17:08 -05:00
|
|
|
if !stringIDRegexp.MatchString(content) {
|
2015-11-18 17:15:00 -05:00
|
|
|
return "", errors.New("invalid init id value")
|
|
|
|
}
|
|
|
|
|
2016-01-19 14:17:08 -05:00
|
|
|
return content, nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) {
|
|
|
|
content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent"))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2017-01-06 20:23:18 -05:00
|
|
|
dgst, err := digest.Parse(strings.TrimSpace(string(content)))
|
2015-11-18 17:15:00 -05:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ChainID(dgst), nil
|
|
|
|
}
|
|
|
|
|
2019-05-07 21:27:15 -04:00
|
|
|
func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) {
|
|
|
|
var orphanLayers []roLayer
|
|
|
|
for _, algorithm := range supportedAlgorithms {
|
|
|
|
fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm)))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, fi := range fileInfos {
|
|
|
|
if fi.IsDir() && strings.Contains(fi.Name(), "-removing") {
|
|
|
|
nameSplit := strings.Split(fi.Name(), "-")
|
|
|
|
dgst := digest.NewDigestFromEncoded(algorithm, nameSplit[0])
|
|
|
|
if err := dgst.Validate(); err != nil {
|
|
|
|
logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, nameSplit[0])
|
|
|
|
} else {
|
|
|
|
chainID := ChainID(dgst)
|
|
|
|
chainFile := filepath.Join(fms.root, string(algorithm), fi.Name(), "cache-id")
|
|
|
|
contentBytes, err := ioutil.ReadFile(chainFile)
|
|
|
|
if err != nil {
|
|
|
|
logrus.WithError(err).WithField("digest", dgst).Error("cannot get cache ID")
|
|
|
|
}
|
|
|
|
cacheID := strings.TrimSpace(string(contentBytes))
|
|
|
|
if cacheID == "" {
|
|
|
|
logrus.Errorf("invalid cache id value")
|
|
|
|
}
|
|
|
|
|
|
|
|
l := &roLayer{
|
|
|
|
chainID: chainID,
|
|
|
|
cacheID: cacheID,
|
|
|
|
}
|
|
|
|
orphanLayers = append(orphanLayers, *l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return orphanLayers, nil
|
|
|
|
}
|
|
|
|
|
2015-11-18 17:15:00 -05:00
|
|
|
func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
|
|
|
|
var ids []ChainID
|
|
|
|
for _, algorithm := range supportedAlgorithms {
|
|
|
|
fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm)))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, fi := range fileInfos {
|
|
|
|
if fi.IsDir() && fi.Name() != "mounts" {
|
|
|
|
dgst := digest.NewDigestFromHex(string(algorithm), fi.Name())
|
|
|
|
if err := dgst.Validate(); err != nil {
|
|
|
|
logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
|
|
|
|
} else {
|
|
|
|
ids = append(ids, ChainID(dgst))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts"))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return ids, []string{}, nil
|
|
|
|
}
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var mounts []string
|
|
|
|
for _, fi := range fileInfos {
|
|
|
|
if fi.IsDir() {
|
|
|
|
mounts = append(mounts, fi.Name())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ids, mounts, nil
|
|
|
|
}
|
|
|
|
|
2019-05-07 21:27:15 -04:00
|
|
|
// Remove layerdb folder if that is marked for removal
|
|
|
|
func (fms *fileMetadataStore) Remove(layer ChainID, cache string) error {
|
|
|
|
dgst := digest.Digest(layer)
|
|
|
|
files, err := ioutil.ReadDir(filepath.Join(fms.root, string(dgst.Algorithm())))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, f := range files {
|
|
|
|
if !strings.HasSuffix(f.Name(), "-removing") || !strings.HasPrefix(f.Name(), dgst.String()) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that we only remove layerdb folder which points to
|
|
|
|
// requested cacheID
|
|
|
|
dir := filepath.Join(fms.root, string(dgst.Algorithm()), f.Name())
|
|
|
|
chainFile := filepath.Join(dir, "cache-id")
|
|
|
|
contentBytes, err := ioutil.ReadFile(chainFile)
|
|
|
|
if err != nil {
|
|
|
|
logrus.WithError(err).WithField("file", chainFile).Error("cannot get cache ID")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
cacheID := strings.TrimSpace(string(contentBytes))
|
|
|
|
if cacheID != cache {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
logrus.Debugf("Removing folder: %s", dir)
|
|
|
|
err = os.RemoveAll(dir)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
logrus.WithError(err).WithField("name", f.Name()).Error("cannot remove layer")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2015-11-18 17:15:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fms *fileMetadataStore) RemoveMount(mount string) error {
|
|
|
|
return os.RemoveAll(fms.getMountDirectory(mount))
|
|
|
|
}
|