1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

vendor: update buildkit to v0.6.1

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2019-08-14 17:46:46 -07:00
parent d5f607bd0f
commit e59b26087f
12 changed files with 98 additions and 40 deletions

View file

@ -27,7 +27,7 @@ github.com/imdario/mergo 7c29201646fa3de8506f70121347
golang.org/x/sync e225da77a7e68af35c70ccbf71af2b83e6acac3c golang.org/x/sync e225da77a7e68af35c70ccbf71af2b83e6acac3c
# buildkit # buildkit
github.com/moby/buildkit f5a55a9516d1c6e2ade9bec22b83259caeed3a84 github.com/moby/buildkit be0d75f074e7a4b0f5b5877c719213a3f5057e60 # v0.6.1
github.com/tonistiigi/fsutil 3bbb99cdbd76619ab717299830c60f6f2a533a6b github.com/tonistiigi/fsutil 3bbb99cdbd76619ab717299830c60f6f2a533a6b
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7

View file

@ -111,7 +111,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
} }
var mu sync.Mutex var mu sync.Mutex
cc := v1.NewCacheChains() var cMap = map[digest.Digest]*v1.CacheChains{}
eg, ctx := errgroup.WithContext(ctx) eg, ctx := errgroup.WithContext(ctx)
for dgst, dt := range m { for dgst, dt := range m {
@ -183,11 +183,12 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
if err != nil { if err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} }
cc := v1.NewCacheChains()
mu.Lock()
if err := v1.ParseConfig(config, layers, cc); err != nil { if err := v1.ParseConfig(config, layers, cc); err != nil {
return err return err
} }
mu.Lock()
cMap[dgst] = cc
mu.Unlock() mu.Unlock()
return nil return nil
}) })
@ -198,11 +199,17 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
return nil, err return nil, err
} }
cms := make([]solver.CacheManager, 0, len(cMap))
for _, cc := range cMap {
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w) keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return solver.NewCacheManager(id, keysStorage, resultStorage), nil cms = append(cms, solver.NewCacheManager(id, keysStorage, resultStorage))
}
return solver.NewCombinedCacheManager(cms, nil), nil
} }
func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt []byte, m map[digest.Digest][]byte) error { func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt []byte, m map[digest.Digest][]byte) error {

View file

@ -31,6 +31,12 @@ func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
return nil, nil return nil, nil
} }
func (ce *exporter) reset() {
cc := v1.NewCacheChains()
ce.CacheExporterTarget = cc
ce.chains = cc
}
func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) { func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) {
config, descs, err := ce.chains.Marshal() config, descs, err := ce.chains.Marshal()
if err != nil { if err != nil {
@ -82,6 +88,7 @@ func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
ce.reset()
return dt, nil return dt, nil
} }

View file

@ -78,10 +78,14 @@ func sortConfig(cc *CacheConfig) {
if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector { if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector {
return ri.Inputs[i][j].Selector < rj.Inputs[i][j].Selector return ri.Inputs[i][j].Selector < rj.Inputs[i][j].Selector
} }
return cc.Records[ri.Inputs[i][j].LinkIndex].Digest < cc.Records[rj.Inputs[i][j].LinkIndex].Digest inputDigesti := cc.Records[ri.Inputs[i][j].LinkIndex].Digest
inputDigestj := cc.Records[rj.Inputs[i][j].LinkIndex].Digest
if inputDigesti != inputDigestj {
return inputDigesti < inputDigestj
} }
} }
return ri.Digest < rj.Digest }
return false
}) })
for i, l := range sortedRecords { for i, l := range sortedRecords {
l.newIndex = i l.newIndex = i

View file

@ -49,6 +49,7 @@ const (
keyNameContext = "contextkey" keyNameContext = "contextkey"
keyNameDockerfile = "dockerfilekey" keyNameDockerfile = "dockerfilekey"
keyContextSubDir = "contextsubdir" keyContextSubDir = "contextsubdir"
keyContextKeepGitDir = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR"
) )
var httpPrefix = regexp.MustCompile(`^https?://`) var httpPrefix = regexp.MustCompile(`^https?://`)
@ -129,7 +130,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
var buildContext *llb.State var buildContext *llb.State
isScratchContext := false isScratchContext := false
if st, ok := detectGitContext(opts[localNameContext]); ok { if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDir]); ok {
if !forceLocalDockerfile { if !forceLocalDockerfile {
src = *st src = *st
} }
@ -451,12 +452,19 @@ func filter(opt map[string]string, key string) map[string]string {
return m return m
} }
func detectGitContext(ref string) (*llb.State, bool) { func detectGitContext(ref, gitContext string) (*llb.State, bool) {
found := false found := false
if httpPrefix.MatchString(ref) && gitUrlPathWithFragmentSuffix.MatchString(ref) { if httpPrefix.MatchString(ref) && gitUrlPathWithFragmentSuffix.MatchString(ref) {
found = true found = true
} }
keepGit := false
if gitContext != "" {
if v, err := strconv.ParseBool(gitContext); err == nil {
keepGit = v
}
}
for _, prefix := range []string{"git://", "github.com/", "git@"} { for _, prefix := range []string{"git://", "github.com/", "git@"} {
if strings.HasPrefix(ref, prefix) { if strings.HasPrefix(ref, prefix) {
found = true found = true
@ -472,7 +480,12 @@ func detectGitContext(ref string) (*llb.State, bool) {
if len(parts) > 1 { if len(parts) > 1 {
branch = parts[1] branch = parts[1]
} }
st := llb.Git(parts[0], branch, dockerfile2llb.WithInternalName("load git source "+ref)) gitOpts := []llb.GitOption{dockerfile2llb.WithInternalName("load git source " + ref)}
if keepGit {
gitOpts = append(gitOpts, llb.KeepGitDir())
}
st := llb.Git(parts[0], branch, gitOpts...)
return &st, true return &st, true
} }

View file

@ -746,9 +746,9 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS
}}, copyOpt...) }}, copyOpt...)
if a == nil { if a == nil {
a = llb.Copy(sourceState, src, dest, opts...) a = llb.Copy(sourceState, filepath.Join("/", src), dest, opts...)
} else { } else {
a = a.Copy(sourceState, src, dest, opts...) a = a.Copy(sourceState, filepath.Join("/", src), dest, opts...)
} }
} }
} }

View file

@ -12,6 +12,7 @@ import (
"github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/solver/pb"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -113,7 +114,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*
} }
if mount.ReadOnly { if mount.ReadOnly {
mountOpts = append(mountOpts, llb.Readonly) mountOpts = append(mountOpts, llb.Readonly)
} else if mount.Type == instructions.MountTypeBind { } else if mount.Type == instructions.MountTypeBind && opt.llbCaps.Supports(pb.CapExecMountBindReadWriteNoOuput) == nil {
mountOpts = append(mountOpts, llb.ForceNoOutput) mountOpts = append(mountOpts, llb.ForceNoOutput)
} }
if mount.Type == instructions.MountTypeCache { if mount.Type == instructions.MountTypeCache {

View file

@ -11,7 +11,7 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
func newCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager { func NewCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager {
return &combinedCacheManager{cms: cms, main: main} return &combinedCacheManager{cms: cms, main: main}
} }
@ -80,7 +80,7 @@ func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (res
res.Result.Release(context.TODO()) res.Result.Release(context.TODO())
} }
}() }()
if rec.cacheManager != cm.main { if rec.cacheManager != cm.main && cm.main != nil {
for _, res := range results { for _, res := range results {
if _, err := cm.main.Save(res.CacheKey, res.Result, res.CacheResult.CreatedAt); err != nil { if _, err := cm.main.Save(res.CacheKey, res.Result, res.CacheResult.CreatedAt); err != nil {
return nil, err return nil, err
@ -91,6 +91,9 @@ func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (res
} }
func (cm *combinedCacheManager) Save(key *CacheKey, s Result, createdAt time.Time) (*ExportableCacheKey, error) { func (cm *combinedCacheManager) Save(key *CacheKey, s Result, createdAt time.Time) (*ExportableCacheKey, error) {
if cm.main == nil {
return nil, nil
}
return cm.main.Save(key, s, createdAt) return cm.main.Save(key, s, createdAt)
} }

View file

@ -141,7 +141,7 @@ func (s *state) combinedCacheManager() CacheManager {
return s.mainCache return s.mainCache
} }
return newCombinedCacheManager(cms, s.mainCache) return NewCombinedCacheManager(cms, s.mainCache)
} }
func (s *state) Release() { func (s *state) Release() {

View file

@ -256,7 +256,7 @@ func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id
} }
func (e *execOp) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, m *pb.Mount, block bool) (cache.MutableRef, error) { func (e *execOp) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, m *pb.Mount, block bool) (cache.MutableRef, error) {
makeMutable := func(cache.ImmutableRef) (cache.MutableRef, error) { makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
desc := fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) desc := fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " "))
return e.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(desc), cache.CachePolicyRetain) return e.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(desc), cache.CachePolicyRetain)
} }
@ -585,7 +585,7 @@ func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Res
mountable = ref mountable = ref
} }
makeMutable := func(cache.ImmutableRef) (cache.MutableRef, error) { makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " "))
return e.cm.New(ctx, ref, cache.WithDescription(desc)) return e.cm.New(ctx, ref, cache.WithDescription(desc))
} }
@ -606,7 +606,7 @@ func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Res
outputs = append(outputs, active) outputs = append(outputs, active)
mountable = active mountable = active
} }
} else if ref == nil { } else if (!m.Readonly || ref == nil) && m.Dest != pb.RootMount {
// this case is empty readonly scratch without output that is not really useful for anything but don't error // this case is empty readonly scratch without output that is not really useful for anything but don't error
active, err := makeMutable(ref) active, err := makeMutable(ref)
if err != nil { if err != nil {

View file

@ -36,6 +36,7 @@ const (
CapExecMetaSecurity apicaps.CapID = "exec.meta.security" CapExecMetaSecurity apicaps.CapID = "exec.meta.security"
CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath"
CapExecMountBind apicaps.CapID = "exec.mount.bind" CapExecMountBind apicaps.CapID = "exec.mount.bind"
CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput"
CapExecMountCache apicaps.CapID = "exec.mount.cache" CapExecMountCache apicaps.CapID = "exec.mount.cache"
CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing"
CapExecMountSelector apicaps.CapID = "exec.mount.selector" CapExecMountSelector apicaps.CapID = "exec.mount.selector"
@ -193,6 +194,12 @@ func init() {
Status: apicaps.CapStatusExperimental, Status: apicaps.CapStatusExperimental,
}) })
Caps.Init(apicaps.Cap{
ID: CapExecMountBindReadWriteNoOuput,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{ Caps.Init(apicaps.Cap{
ID: CapExecMountCache, ID: CapExecMountCache,
Enabled: true, Enabled: true,

View file

@ -153,6 +153,14 @@ type gitSourceHandler struct {
cacheKey string cacheKey string
} }
func (gs *gitSourceHandler) shaToCacheKey(sha string) string {
key := sha
if gs.src.KeepGitDir {
key += ".git"
}
return key
}
func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier, _ *session.Manager) (source.SourceInstance, error) { func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier, _ *session.Manager) (source.SourceInstance, error) {
gitIdentifier, ok := id.(*source.GitIdentifier) gitIdentifier, ok := id.(*source.GitIdentifier)
if !ok { if !ok {
@ -175,6 +183,7 @@ func (gs *gitSourceHandler) CacheKey(ctx context.Context, index int) (string, bo
defer gs.locker.Unlock(remote) defer gs.locker.Unlock(remote)
if isCommitSHA(ref) { if isCommitSHA(ref) {
ref = gs.shaToCacheKey(ref)
gs.cacheKey = ref gs.cacheKey = ref
return ref, true, nil return ref, true, nil
} }
@ -201,6 +210,7 @@ func (gs *gitSourceHandler) CacheKey(ctx context.Context, index int) (string, bo
if !isCommitSHA(sha) { if !isCommitSHA(sha) {
return "", false, errors.Errorf("invalid commit sha %q", sha) return "", false, errors.Errorf("invalid commit sha %q", sha)
} }
sha = gs.shaToCacheKey(sha)
gs.cacheKey = sha gs.cacheKey = sha
return sha, true, nil return sha, true, nil
} }
@ -298,11 +308,15 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRe
}() }()
if gs.src.KeepGitDir { if gs.src.KeepGitDir {
_, err = gitWithinDir(ctx, checkoutDir, "", "init") checkoutDirGit := filepath.Join(checkoutDir, ".git")
if err := os.MkdirAll(checkoutDir, 0711); err != nil {
return nil, err
}
_, err = gitWithinDir(ctx, checkoutDirGit, "", "init")
if err != nil { if err != nil {
return nil, err return nil, err
} }
_, err = gitWithinDir(ctx, checkoutDir, "", "remote", "add", "origin", gitDir) _, err = gitWithinDir(ctx, checkoutDirGit, "", "remote", "add", "origin", gitDir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -313,16 +327,18 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRe
if err != nil { if err != nil {
return nil, err return nil, err
} }
} else {
pullref += ":" + pullref
} }
_, err = gitWithinDir(ctx, checkoutDir, "", "fetch", "--depth=1", "origin", pullref) _, err = gitWithinDir(ctx, checkoutDirGit, "", "fetch", "-u", "--depth=1", "origin", pullref)
if err != nil { if err != nil {
return nil, err return nil, err
} }
_, err = gitWithinDir(ctx, checkoutDir, checkoutDir, "checkout", "FETCH_HEAD") _, err = gitWithinDir(ctx, checkoutDirGit, checkoutDir, "checkout", "FETCH_HEAD")
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote) return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote)
} }
gitDir = checkoutDir gitDir = checkoutDirGit
} else { } else {
_, err = gitWithinDir(ctx, gitDir, checkoutDir, "checkout", ref, "--", ".") _, err = gitWithinDir(ctx, gitDir, checkoutDir, "checkout", ref, "--", ".")
if err != nil { if err != nil {