1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

vendor: github.com/moby/buildkit v0.8.2

full diff: 68bb095353...9065b18ba4

- fix seccomp compatibility in 32bit arm
    - fixes Unable to build alpine:edge containers for armv7
    - fixes Buildx failing to build for arm/v7 platform on arm64 machine
- resolver: avoid error caching on token fetch
    - fixes "Error: i/o timeout should not be cached"
- fileop: fix checksum to contain indexes of inputs
- frontend/dockerfile: add RunCommand.FlagsUsed field
    - relates to [20.10] Classic builder silently ignores unsupported Dockerfile command flags
- update qemu emulators
    - relates to "Impossible to run git clone inside buildx with non x86 architecture"
- Fix reference count issues on typed errors with mount references
    - fixes errors on releasing mounts with typed execerror refs
    - fixes / addresses invalid mutable ref when using shared cache mounts
- dockerfile/docs: fix frontend image tags
- git: set token only for main remote access
    - fixes "Loading repositories with submodules is repeated. Failed to clone submodule from googlesource"
- allow skipping empty layer detection on cache export

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit 9962a3f74e)
Signed-off-by: Tibor Vass <tibor@docker.com>
This commit is contained in:
Sebastiaan van Stijn 2021-02-22 10:41:18 +01:00 committed by Tibor Vass
parent 148e6c9514
commit 3ce37a6aa4
16 changed files with 154 additions and 56 deletions

View file

@ -33,7 +33,7 @@ github.com/imdario/mergo 1afb36080aec31e0d1528973ebe6
golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb
# buildkit # buildkit
github.com/moby/buildkit 68bb095353c65bc3993fd534c26cf77fe05e61b1 # v0.8 branch github.com/moby/buildkit 9065b18ba4633c75862befca8188de4338d9f94a # v0.8.2
github.com/tonistiigi/fsutil 0834f99b7b85462efb69b4f571a4fa3ca7da5ac9 github.com/tonistiigi/fsutil 0834f99b7b85462efb69b4f571a4fa3ca7da5ac9
github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2 github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746

View file

@ -10,6 +10,10 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
// EmptyLayerRemovalSupported defines if implementation supports removal of empty layers. Buildkit image exporter
// removes empty layers, but moby layerstore based implementation does not.
var EmptyLayerRemovalSupported = true
// sortConfig sorts the config structure to make sure it is deterministic // sortConfig sorts the config structure to make sure it is deterministic
func sortConfig(cc *CacheConfig) { func sortConfig(cc *CacheConfig) {
type indexedLayer struct { type indexedLayer struct {
@ -239,7 +243,7 @@ func marshalRemote(r *solver.Remote, state *marshalState) string {
} }
desc := r.Descriptors[len(r.Descriptors)-1] desc := r.Descriptors[len(r.Descriptors)-1]
if desc.Digest == exptypes.EmptyGZLayer { if desc.Digest == exptypes.EmptyGZLayer && EmptyLayerRemovalSupported {
return parentID return parentID
} }

View file

@ -108,6 +108,15 @@ func (fl *Flag) IsUsed() bool {
return false return false
} }
// Used returns a slice of flag names that are set
func (bf *BFlags) Used() []string {
used := make([]string, 0, len(bf.used))
for f := range bf.used {
used = append(used, f)
}
return used
}
// IsTrue checks if a bool flag is true // IsTrue checks if a bool flag is true
func (fl *Flag) IsTrue() bool { func (fl *Flag) IsTrue() bool {
if fl.flagType != boolType { if fl.flagType != boolType {

View file

@ -269,6 +269,7 @@ type RunCommand struct {
withNameAndCode withNameAndCode
withExternalData withExternalData
ShellDependantCmdLine ShellDependantCmdLine
FlagsUsed []string
} }
// CmdCommand : CMD foo // CmdCommand : CMD foo

View file

@ -375,7 +375,7 @@ func parseRun(req parseRequest) (*RunCommand, error) {
if err := req.flags.Parse(); err != nil { if err := req.flags.Parse(); err != nil {
return nil, err return nil, err
} }
cmd.FlagsUsed = req.flags.Used()
cmd.ShellDependantCmdLine = parseShellDependentCommand(req, false) cmd.ShellDependantCmdLine = parseShellDependentCommand(req, false)
cmd.withNameAndCode = newWithNameAndCode(req) cmd.withNameAndCode = newWithNameAndCode(req)

View file

@ -127,6 +127,7 @@ type MountRef struct {
type MountMutableRef struct { type MountMutableRef struct {
Ref cache.MutableRef Ref cache.MutableRef
MountIndex int MountIndex int
NoCommit bool
} }
type MakeMutable func(m *opspb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error) type MakeMutable func(m *opspb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error)
@ -196,6 +197,7 @@ func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manage
p.Actives = append(p.Actives, MountMutableRef{ p.Actives = append(p.Actives, MountMutableRef{
MountIndex: i, MountIndex: i,
Ref: active, Ref: active,
NoCommit: true,
}) })
if m.Output != opspb.SkipOutput && ref != nil { if m.Output != opspb.SkipOutput && ref != nil {
p.OutputRefs = append(p.OutputRefs, MountRef{ p.OutputRefs = append(p.OutputRefs, MountRef{

View file

@ -21,10 +21,15 @@ func (e *ExecError) Unwrap() error {
} }
func (e *ExecError) EachRef(fn func(solver.Result) error) (err error) { func (e *ExecError) EachRef(fn func(solver.Result) error) (err error) {
m := map[solver.Result]struct{}{}
for _, res := range e.Inputs { for _, res := range e.Inputs {
if res == nil { if res == nil {
continue continue
} }
if _, ok := m[res]; ok {
continue
}
m[res] = struct{}{}
if err1 := fn(res); err1 != nil && err == nil { if err1 := fn(res); err1 != nil && err == nil {
err = err1 err = err1
} }
@ -33,6 +38,10 @@ func (e *ExecError) EachRef(fn func(solver.Result) error) (err error) {
if res == nil { if res == nil {
continue continue
} }
if _, ok := m[res]; ok {
continue
}
m[res] = struct{}{}
if err1 := fn(res); err1 != nil && err == nil { if err1 := fn(res); err1 != nil && err == nil {
err = err1 err = err1
} }

View file

@ -235,7 +235,7 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu
if m.Input == -1 { if m.Input == -1 {
continue continue
} }
execInputs[i] = inputs[m.Input] execInputs[i] = inputs[m.Input].Clone()
} }
execMounts := make([]solver.Result, len(e.op.Mounts)) execMounts := make([]solver.Result, len(e.op.Mounts))
copy(execMounts, execInputs) copy(execMounts, execInputs)
@ -243,12 +243,16 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu
execMounts[p.OutputRefs[i].MountIndex] = res execMounts[p.OutputRefs[i].MountIndex] = res
} }
for _, active := range p.Actives { for _, active := range p.Actives {
ref, cerr := active.Ref.Commit(ctx) if active.NoCommit {
if cerr != nil { active.Ref.Release(context.TODO())
err = errors.Wrapf(err, "error committing %s: %s", active.Ref.ID(), cerr) } else {
continue ref, cerr := active.Ref.Commit(ctx)
if cerr != nil {
err = errors.Wrapf(err, "error committing %s: %s", active.Ref.ID(), cerr)
continue
}
execMounts[active.MountIndex] = worker.NewWorkerRefResult(ref, e.w)
} }
execMounts[active.MountIndex] = worker.NewWorkerRefResult(ref, e.w)
} }
err = errdefs.WithExecError(err, execInputs, execMounts) err = errdefs.WithExecError(err, execInputs, execMounts)
} else { } else {

View file

@ -27,6 +27,7 @@ var qemuArchMap = map[string]string{
"arm": "arm", "arm": "arm",
"s390x": "s390x", "s390x": "s390x",
"ppc64le": "ppc64le", "ppc64le": "ppc64le",
"386": "i386",
} }
type emulator struct { type emulator struct {

View file

@ -61,6 +61,8 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol
} }
} }
indexes := make([][]int, 0, len(f.op.Actions))
for _, action := range f.op.Actions { for _, action := range f.op.Actions {
var dt []byte var dt []byte
var err error var err error
@ -103,14 +105,21 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol
} }
actions = append(actions, dt) actions = append(actions, dt)
indexes = append(indexes, []int{int(action.Input), int(action.SecondaryInput), int(action.Output)})
}
if isDefaultIndexes(indexes) {
indexes = nil
} }
dt, err := json.Marshal(struct { dt, err := json.Marshal(struct {
Type string Type string
Actions [][]byte Actions [][]byte
Indexes [][]int `json:"indexes,omitempty"`
}{ }{
Type: fileCacheType, Type: fileCacheType,
Actions: actions, Actions: actions,
Indexes: indexes,
}) })
if err != nil { if err != nil {
return nil, false, err return nil, false, err
@ -421,7 +430,6 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp
if cerr == nil { if cerr == nil {
outputRes[idx-len(inputs)] = worker.NewWorkerRefResult(ref.(cache.ImmutableRef), s.w) outputRes[idx-len(inputs)] = worker.NewWorkerRefResult(ref.(cache.ImmutableRef), s.w)
} }
inpMount.Release(context.TODO())
} }
// If the action has a secondary input, commit it and set the ref on // If the action has a secondary input, commit it and set the ref on
@ -611,3 +619,39 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp
} }
return inp.(input), err return inp.(input), err
} }
func isDefaultIndexes(idxs [][]int) bool {
// Older version of checksum did not contain indexes for actions resulting in possibility for a wrong cache match.
// We detect the most common pattern for indexes and maintain old checksum for that case to minimize cache misses on upgrade.
// If a future change causes braking changes in instruction cache consider removing this exception.
if len(idxs) == 0 {
return false
}
for i, idx := range idxs {
if len(idx) != 3 {
return false
}
// input for first action is first input
if i == 0 && idx[0] != 0 {
return false
}
// input for other actions is previous action
if i != 0 && idx[0] != len(idxs)+(i-1) {
return false
}
// secondary input is second input or -1
if idx[1] != -1 && idx[1] != 1 {
return false
}
// last action creates output
if i == len(idxs)-1 && idx[2] != 0 {
return false
}
// other actions do not create an output
if i != len(idxs)-1 && idx[2] != -1 {
return false
}
}
return true
}

View file

@ -47,7 +47,7 @@ type splitResult struct {
func (r *splitResult) Release(ctx context.Context) error { func (r *splitResult) Release(ctx context.Context) error {
if atomic.AddInt64(&r.released, 1) > 1 { if atomic.AddInt64(&r.released, 1) > 1 {
err := errors.Errorf("releasing already released reference") err := errors.Errorf("releasing already released reference %+v", r.Result.ID())
logrus.Error(err) logrus.Error(err)
return err return err
} }
@ -78,10 +78,14 @@ func NewSharedCachedResult(res CachedResult) *SharedCachedResult {
} }
} }
func (r *SharedCachedResult) Clone() CachedResult { func (r *SharedCachedResult) CloneCachedResult() CachedResult {
return &clonedCachedResult{Result: r.SharedResult.Clone(), cr: r.CachedResult} return &clonedCachedResult{Result: r.SharedResult.Clone(), cr: r.CachedResult}
} }
func (r *SharedCachedResult) Clone() Result {
return r.CloneCachedResult()
}
func (r *SharedCachedResult) Release(ctx context.Context) error { func (r *SharedCachedResult) Release(ctx context.Context) error {
return r.SharedResult.Release(ctx) return r.SharedResult.Release(ctx)
} }

View file

@ -244,7 +244,7 @@ func (s *scheduler) build(ctx context.Context, edge Edge) (CachedResult, error)
if err := p.Receiver.Status().Err; err != nil { if err := p.Receiver.Status().Err; err != nil {
return nil, err return nil, err
} }
return p.Receiver.Status().Value.(*edgeState).result.Clone(), nil return p.Receiver.Status().Value.(*edgeState).result.CloneCachedResult(), nil
} }
// newPipe creates a new request pipe between two edges // newPipe creates a new request pipe between two edges

View file

@ -61,6 +61,7 @@ type Result interface {
ID() string ID() string
Release(context.Context) error Release(context.Context) error
Sys() interface{} Sys() interface{}
Clone() Result
} }
// CachedResult is a result connected with its cache key // CachedResult is a result connected with its cache key

View file

@ -231,7 +231,7 @@ func (gs *gitSourceHandler) getAuthToken(ctx context.Context, g session.Group) e
if s.token { if s.token {
dt = []byte("basic " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("x-access-token:%s", dt)))) dt = []byte("basic " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("x-access-token:%s", dt))))
} }
gs.auth = []string{"-c", "http.extraheader=Authorization: " + string(dt)} gs.auth = []string{"-c", "http." + tokenScope(gs.src.Remote) + ".extraheader=Authorization: " + string(dt)}
break break
} }
return nil return nil
@ -631,3 +631,14 @@ func argsNoDepth(args []string) []string {
} }
return out return out
} }
func tokenScope(remote string) string {
// generally we can only use the token for fetching main remote but in case of github.com we do best effort
// to try reuse same token for all github.com remotes. This is the same behavior actions/checkout uses
for _, pfx := range []string{"https://github.com/", "https://www.github.com/"} {
if strings.HasPrefix(remote, pfx) {
return pfx
}
}
return remote
}

View file

@ -220,15 +220,13 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
// authResult is used to control limit rate. // authResult is used to control limit rate.
type authResult struct { type authResult struct {
sync.WaitGroup
token string token string
err error
expires time.Time expires time.Time
} }
// authHandler is used to handle auth request per registry server. // authHandler is used to handle auth request per registry server.
type authHandler struct { type authHandler struct {
sync.Mutex g flightcontrol.Group
client *http.Client client *http.Client
@ -240,7 +238,8 @@ type authHandler struct {
// scopedTokens caches token indexed by scopes, which used in // scopedTokens caches token indexed by scopes, which used in
// bearer auth case // bearer auth case
scopedTokens map[string]*authResult scopedTokens map[string]*authResult
scopedTokensMu sync.Mutex
lastUsed time.Time lastUsed time.Time
@ -292,46 +291,44 @@ func (ah *authHandler) doBearerAuth(ctx context.Context, sm *session.Manager, g
// Docs: https://docs.docker.com/registry/spec/auth/scope // Docs: https://docs.docker.com/registry/spec/auth/scope
scoped := strings.Join(to.Scopes, " ") scoped := strings.Join(to.Scopes, " ")
ah.Lock() res, err := ah.g.Do(ctx, scoped, func(ctx context.Context) (interface{}, error) {
for { ah.scopedTokensMu.Lock()
r, exist := ah.scopedTokens[scoped] r, exist := ah.scopedTokens[scoped]
if !exist { ah.scopedTokensMu.Unlock()
// no entry cached if exist {
break if r.expires.IsZero() || r.expires.After(time.Now()) {
} return r, nil
ah.Unlock()
r.Wait()
if r.err != nil {
select {
case <-ctx.Done():
return "", r.err
default:
} }
} }
if !errors.Is(r.err, context.Canceled) && r, err := ah.fetchToken(ctx, sm, g, to)
(r.expires.IsZero() || r.expires.After(time.Now())) { if err != nil {
return r.token, r.err return nil, err
}
// r.err is canceled or token expired. Get rid of it and try again
ah.Lock()
r2, exist := ah.scopedTokens[scoped]
if exist && r == r2 {
delete(ah.scopedTokens, scoped)
} }
ah.scopedTokensMu.Lock()
ah.scopedTokens[scoped] = r
ah.scopedTokensMu.Unlock()
return r, nil
})
if err != nil || res == nil {
return "", err
} }
r := res.(*authResult)
if r == nil {
return "", nil
}
return r.token, nil
}
// only one fetch token job func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g session.Group, to auth.TokenOptions) (r *authResult, err error) {
r := new(authResult)
r.Add(1)
ah.scopedTokens[scoped] = r
ah.Unlock()
var issuedAt time.Time var issuedAt time.Time
var expires int var expires int
var token string
defer func() { defer func() {
token = fmt.Sprintf("Bearer %s", token) token = fmt.Sprintf("Bearer %s", token)
r.token, r.err = token, err
if err == nil { if err == nil {
r = &authResult{token: token}
if issuedAt.IsZero() { if issuedAt.IsZero() {
issuedAt = time.Now() issuedAt = time.Now()
} }
@ -339,7 +336,6 @@ func (ah *authHandler) doBearerAuth(ctx context.Context, sm *session.Manager, g
r.expires = exp r.expires = exp
} }
} }
r.Done()
}() }()
if ah.authority != nil { if ah.authority != nil {
@ -351,10 +347,11 @@ func (ah *authHandler) doBearerAuth(ctx context.Context, sm *session.Manager, g
Scopes: to.Scopes, Scopes: to.Scopes,
}, sm, g) }, sm, g)
if err != nil { if err != nil {
return "", err return nil, err
} }
issuedAt, expires = time.Unix(resp.IssuedAt, 0), int(resp.ExpiresIn) issuedAt, expires = time.Unix(resp.IssuedAt, 0), int(resp.ExpiresIn)
return resp.Token, nil token = resp.Token
return nil, nil
} }
// fetch token for the resource scope // fetch token for the resource scope
@ -374,29 +371,32 @@ func (ah *authHandler) doBearerAuth(ctx context.Context, sm *session.Manager, g
if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 {
resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, nil, "buildkit-client", to) resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, nil, "buildkit-client", to)
if err != nil { if err != nil {
return "", err return nil, err
} }
issuedAt, expires = resp.IssuedAt, resp.ExpiresIn issuedAt, expires = resp.IssuedAt, resp.ExpiresIn
return resp.AccessToken, nil token = resp.AccessToken
return nil, nil
} }
log.G(ctx).WithFields(logrus.Fields{ log.G(ctx).WithFields(logrus.Fields{
"status": errStatus.Status, "status": errStatus.Status,
"body": string(errStatus.Body), "body": string(errStatus.Body),
}).Debugf("token request failed") }).Debugf("token request failed")
} }
return "", err return nil, err
} }
issuedAt, expires = resp.IssuedAt, resp.ExpiresIn issuedAt, expires = resp.IssuedAt, resp.ExpiresIn
return resp.Token, nil token = resp.Token
return nil, nil
} }
// do request anonymously // do request anonymously
resp, err := auth.FetchToken(ctx, ah.client, nil, to) resp, err := auth.FetchToken(ctx, ah.client, nil, to)
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to fetch anonymous token") return nil, errors.Wrap(err, "failed to fetch anonymous token")
} }
issuedAt, expires = resp.IssuedAt, resp.ExpiresIn issuedAt, expires = resp.IssuedAt, resp.ExpiresIn
return resp.Token, nil token = resp.Token
return nil, nil
} }
func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { func invalidAuthorization(c auth.Challenge, responses []*http.Response) error {

View file

@ -52,3 +52,11 @@ func (r *workerRefResult) Release(ctx context.Context) error {
func (r *workerRefResult) Sys() interface{} { func (r *workerRefResult) Sys() interface{} {
return r.WorkerRef return r.WorkerRef
} }
func (r *workerRefResult) Clone() solver.Result {
r2 := *r
if r.ImmutableRef != nil {
r.ImmutableRef = r.ImmutableRef.Clone()
}
return &r2
}