mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #39983 from tiborvass/rm-legacy-build-session
builder: remove legacy build's session handling
This commit is contained in:
commit
3c548254a2
10 changed files with 15 additions and 1009 deletions
|
@ -9,11 +9,9 @@ import (
|
|||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
buildkit "github.com/docker/docker/builder/builder-next"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
@ -31,14 +29,13 @@ type Builder interface {
|
|||
// Backend provides build functionality to the API router
|
||||
type Backend struct {
|
||||
builder Builder
|
||||
fsCache *fscache.FSCache
|
||||
imageComponent ImageComponent
|
||||
buildkit *buildkit.Builder
|
||||
}
|
||||
|
||||
// NewBackend creates a new build backend from components
|
||||
func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache, buildkit *buildkit.Builder) (*Backend, error) {
|
||||
return &Backend{imageComponent: components, builder: builder, fsCache: fsCache, buildkit: buildkit}, nil
|
||||
func NewBackend(components ImageComponent, builder Builder, buildkit *buildkit.Builder) (*Backend, error) {
|
||||
return &Backend{imageComponent: components, builder: builder, buildkit: buildkit}, nil
|
||||
}
|
||||
|
||||
// RegisterGRPC registers buildkit controller to the grpc server.
|
||||
|
@ -99,34 +96,11 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
|||
|
||||
// PruneCache removes all cached build sources
|
||||
func (b *Backend) PruneCache(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
var fsCacheSize uint64
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
fsCacheSize, err = b.fsCache.Prune(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to prune fscache")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
var buildCacheSize int64
|
||||
var cacheIDs []string
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
buildCacheSize, cacheIDs, err = b.buildkit.Prune(ctx, opts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to prune build cache")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
buildCacheSize, cacheIDs, err := b.buildkit.Prune(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to prune build cache")
|
||||
}
|
||||
|
||||
return &types.BuildCachePruneReport{SpaceReclaimed: fsCacheSize + uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
|
||||
return &types.BuildCachePruneReport{SpaceReclaimed: uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
|
||||
}
|
||||
|
||||
// Cancel cancels the build by ID
|
||||
|
|
|
@ -3,7 +3,6 @@ package system // import "github.com/docker/docker/api/server/router/system"
|
|||
import (
|
||||
"github.com/docker/docker/api/server/router"
|
||||
buildkit "github.com/docker/docker/builder/builder-next"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
)
|
||||
|
||||
// systemRouter provides information about the Docker system overall.
|
||||
|
@ -12,17 +11,15 @@ type systemRouter struct {
|
|||
backend Backend
|
||||
cluster ClusterBackend
|
||||
routes []router.Route
|
||||
fscache *fscache.FSCache // legacy
|
||||
builder *buildkit.Builder
|
||||
features *map[string]bool
|
||||
}
|
||||
|
||||
// NewRouter initializes a new system router
|
||||
func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache, builder *buildkit.Builder, features *map[string]bool) router.Router {
|
||||
func NewRouter(b Backend, c ClusterBackend, builder *buildkit.Builder, features *map[string]bool) router.Router {
|
||||
r := &systemRouter{
|
||||
backend: b,
|
||||
cluster: c,
|
||||
fscache: fscache,
|
||||
builder: builder,
|
||||
features: features,
|
||||
}
|
||||
|
|
|
@ -101,16 +101,6 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
|
|||
return err
|
||||
})
|
||||
|
||||
var builderSize int64 // legacy
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
builderSize, err = s.fscache.DiskUsage(ctx)
|
||||
if err != nil {
|
||||
return pkgerrors.Wrap(err, "error getting fscache build cache usage")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
var buildCache []*types.BuildCache
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
|
@ -125,6 +115,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
|
|||
return err
|
||||
}
|
||||
|
||||
var builderSize int64
|
||||
for _, b := range buildCache {
|
||||
builderSize += b.Size
|
||||
}
|
||||
|
|
|
@ -8,14 +8,12 @@ import (
|
|||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
|
@ -25,7 +23,6 @@ import (
|
|||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||
"github.com/moby/buildkit/session"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -49,31 +46,19 @@ const (
|
|||
stepFormat = "Step %d/%d : %v"
|
||||
)
|
||||
|
||||
// SessionGetter is object used to get access to a session by uuid
|
||||
type SessionGetter interface {
|
||||
Get(ctx context.Context, uuid string) (session.Caller, error)
|
||||
}
|
||||
|
||||
// BuildManager is shared across all Builder objects
|
||||
type BuildManager struct {
|
||||
idMapping *idtools.IdentityMapping
|
||||
backend builder.Backend
|
||||
pathCache pathCache // TODO: make this persistent
|
||||
sg SessionGetter
|
||||
fsCache *fscache.FSCache
|
||||
}
|
||||
|
||||
// NewBuildManager creates a BuildManager
|
||||
func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, identityMapping *idtools.IdentityMapping) (*BuildManager, error) {
|
||||
func NewBuildManager(b builder.Backend, identityMapping *idtools.IdentityMapping) (*BuildManager, error) {
|
||||
bm := &BuildManager{
|
||||
backend: b,
|
||||
pathCache: &syncmap.Map{},
|
||||
sg: sg,
|
||||
idMapping: identityMapping,
|
||||
fsCache: fsCache,
|
||||
}
|
||||
if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bm, nil
|
||||
}
|
||||
|
@ -100,10 +85,8 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
|
|||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil {
|
||||
return nil, err
|
||||
} else if src != nil {
|
||||
source = src
|
||||
if config.Options.SessionID != "" {
|
||||
return nil, errors.New("experimental session with v1 builder is no longer supported, use builder version v2 (BuildKit) instead")
|
||||
}
|
||||
|
||||
builderOptions := builderOptions{
|
||||
|
@ -120,39 +103,6 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
|
|||
return b.build(source, dockerfile)
|
||||
}
|
||||
|
||||
func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) {
|
||||
if options.SessionID == "" || bm.sg == nil {
|
||||
return nil, nil
|
||||
}
|
||||
logrus.Debug("client is session enabled")
|
||||
|
||||
connectCtx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout)
|
||||
defer cancelCtx()
|
||||
|
||||
c, err := bm.sg.Get(connectCtx, options.SessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
<-c.Context().Done()
|
||||
cancel()
|
||||
}()
|
||||
if options.RemoteContext == remotecontext.ClientSessionRemote {
|
||||
st := time.Now()
|
||||
csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, options.SessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src, err := bm.fsCache.SyncFrom(ctx, csi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("sync-time: %v", time.Since(st))
|
||||
return src, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// builderOptions are the dependencies required by the builder
|
||||
type builderOptions struct {
|
||||
Options *types.ImageBuildOptions
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const sessionConnectTimeout = 5 * time.Second
|
||||
|
||||
// ClientSessionTransport is a transport for copying files from docker client
|
||||
// to the daemon.
|
||||
type ClientSessionTransport struct{}
|
||||
|
||||
// NewClientSessionTransport returns new ClientSessionTransport instance
|
||||
func NewClientSessionTransport() *ClientSessionTransport {
|
||||
return &ClientSessionTransport{}
|
||||
}
|
||||
|
||||
// Copy data from a remote to a destination directory.
|
||||
func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error {
|
||||
csi, ok := id.(*ClientSessionSourceIdentifier)
|
||||
if !ok {
|
||||
return errors.New("invalid identifier for client session")
|
||||
}
|
||||
|
||||
return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{
|
||||
IncludePatterns: csi.includePatterns,
|
||||
DestDir: dest,
|
||||
CacheUpdater: cu,
|
||||
})
|
||||
}
|
||||
|
||||
// ClientSessionSourceIdentifier is an identifier that can be used for requesting
|
||||
// files from remote client
|
||||
type ClientSessionSourceIdentifier struct {
|
||||
includePatterns []string
|
||||
caller session.Caller
|
||||
uuid string
|
||||
}
|
||||
|
||||
// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance
|
||||
func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) {
|
||||
csi := &ClientSessionSourceIdentifier{
|
||||
uuid: uuid,
|
||||
}
|
||||
caller, err := sg.Get(ctx, uuid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get session for %s", uuid)
|
||||
}
|
||||
|
||||
csi.caller = caller
|
||||
return csi, nil
|
||||
}
|
||||
|
||||
// Transport returns transport identifier for remote identifier
|
||||
func (csi *ClientSessionSourceIdentifier) Transport() string {
|
||||
return remotecontext.ClientSessionRemote
|
||||
}
|
||||
|
||||
// SharedKey returns shared key for remote identifier. Shared key is used
|
||||
// for finding the base for a repeated transfer.
|
||||
func (csi *ClientSessionSourceIdentifier) SharedKey() string {
|
||||
return csi.caller.SharedKey()
|
||||
}
|
||||
|
||||
// Key returns unique key for remote identifier. Requests with same key return
|
||||
// same data.
|
||||
func (csi *ClientSessionSourceIdentifier) Key() string {
|
||||
return csi.uuid
|
||||
}
|
|
@ -1,654 +0,0 @@
|
|||
package fscache // import "github.com/docker/docker/builder/fscache"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"hash"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/directory"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fsutiltypes "github.com/tonistiigi/fsutil/types"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
const dbFile = "fscache.db"
|
||||
const cacheKey = "cache"
|
||||
const metaKey = "meta"
|
||||
|
||||
// Backend is a backing implementation for FSCache
|
||||
type Backend interface {
|
||||
Get(id string) (string, error)
|
||||
Remove(id string) error
|
||||
}
|
||||
|
||||
// FSCache allows syncing remote resources to cached snapshots
|
||||
type FSCache struct {
|
||||
opt Opt
|
||||
transports map[string]Transport
|
||||
mu sync.Mutex
|
||||
g singleflight.Group
|
||||
store *fsCacheStore
|
||||
}
|
||||
|
||||
// Opt defines options for initializing FSCache
|
||||
type Opt struct {
|
||||
Backend Backend
|
||||
Root string // for storing local metadata
|
||||
GCPolicy GCPolicy
|
||||
}
|
||||
|
||||
// GCPolicy defines policy for garbage collection
|
||||
type GCPolicy struct {
|
||||
MaxSize uint64
|
||||
MaxKeepDuration time.Duration
|
||||
}
|
||||
|
||||
// NewFSCache returns new FSCache object
|
||||
func NewFSCache(opt Opt) (*FSCache, error) {
|
||||
store, err := newFSCacheStore(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FSCache{
|
||||
store: store,
|
||||
opt: opt,
|
||||
transports: make(map[string]Transport),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Transport defines a method for syncing remote data to FSCache
|
||||
type Transport interface {
|
||||
Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error
|
||||
}
|
||||
|
||||
// RemoteIdentifier identifies a transfer request
|
||||
type RemoteIdentifier interface {
|
||||
Key() string
|
||||
SharedKey() string
|
||||
Transport() string
|
||||
}
|
||||
|
||||
// RegisterTransport registers a new transport method
|
||||
func (fsc *FSCache) RegisterTransport(id string, transport Transport) error {
|
||||
fsc.mu.Lock()
|
||||
defer fsc.mu.Unlock()
|
||||
if _, ok := fsc.transports[id]; ok {
|
||||
return errors.Errorf("transport %v already exists", id)
|
||||
}
|
||||
fsc.transports[id] = transport
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncFrom returns a source based on a remote identifier
|
||||
func (fsc *FSCache) SyncFrom(ctx context.Context, id RemoteIdentifier) (builder.Source, error) { // cacheOpt
|
||||
trasportID := id.Transport()
|
||||
fsc.mu.Lock()
|
||||
transport, ok := fsc.transports[id.Transport()]
|
||||
if !ok {
|
||||
fsc.mu.Unlock()
|
||||
return nil, errors.Errorf("invalid transport %s", trasportID)
|
||||
}
|
||||
|
||||
logrus.Debugf("SyncFrom %s %s", id.Key(), id.SharedKey())
|
||||
fsc.mu.Unlock()
|
||||
sourceRef, err, _ := fsc.g.Do(id.Key(), func() (interface{}, error) {
|
||||
var sourceRef *cachedSourceRef
|
||||
sourceRef, err := fsc.store.Get(id.Key())
|
||||
if err == nil {
|
||||
return sourceRef, nil
|
||||
}
|
||||
|
||||
// check for unused shared cache
|
||||
sharedKey := id.SharedKey()
|
||||
if sharedKey != "" {
|
||||
r, err := fsc.store.Rebase(sharedKey, id.Key())
|
||||
if err == nil {
|
||||
sourceRef = r
|
||||
}
|
||||
}
|
||||
|
||||
if sourceRef == nil {
|
||||
var err error
|
||||
sourceRef, err = fsc.store.New(id.Key(), sharedKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create remote context")
|
||||
}
|
||||
}
|
||||
|
||||
if err := syncFrom(ctx, sourceRef, transport, id); err != nil {
|
||||
sourceRef.Release()
|
||||
return nil, err
|
||||
}
|
||||
if err := sourceRef.resetSize(-1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sourceRef, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ref := sourceRef.(*cachedSourceRef)
|
||||
if ref.src == nil { // failsafe
|
||||
return nil, errors.Errorf("invalid empty pull")
|
||||
}
|
||||
wc := &wrappedContext{Source: ref.src, closer: func() error {
|
||||
ref.Release()
|
||||
return nil
|
||||
}}
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
// DiskUsage reports how much data is allocated by the cache
|
||||
func (fsc *FSCache) DiskUsage(ctx context.Context) (int64, error) {
|
||||
return fsc.store.DiskUsage(ctx)
|
||||
}
|
||||
|
||||
// Prune allows manually cleaning up the cache
|
||||
func (fsc *FSCache) Prune(ctx context.Context) (uint64, error) {
|
||||
return fsc.store.Prune(ctx)
|
||||
}
|
||||
|
||||
// Close stops the gc and closes the persistent db
|
||||
func (fsc *FSCache) Close() error {
|
||||
return fsc.store.Close()
|
||||
}
|
||||
|
||||
func syncFrom(ctx context.Context, cs *cachedSourceRef, transport Transport, id RemoteIdentifier) (retErr error) {
|
||||
src := cs.src
|
||||
if src == nil {
|
||||
src = remotecontext.NewCachableSource(cs.Dir())
|
||||
}
|
||||
|
||||
if !cs.cached {
|
||||
if err := cs.storage.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(id.Key()))
|
||||
dt := b.Get([]byte(cacheKey))
|
||||
if dt != nil {
|
||||
if err := src.UnmarshalBinary(dt); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(src.Scan(), "failed to scan cache records")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dc := &detectChanges{f: src.HandleChange}
|
||||
|
||||
// todo: probably send a bucket to `Copy` and let it return source
|
||||
// but need to make sure that tx is safe
|
||||
if err := transport.Copy(ctx, id, cs.Dir(), dc); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy to %s", cs.Dir())
|
||||
}
|
||||
|
||||
if !dc.supported {
|
||||
if err := src.Scan(); err != nil {
|
||||
return errors.Wrap(err, "failed to scan cache records after transfer")
|
||||
}
|
||||
}
|
||||
cs.cached = true
|
||||
cs.src = src
|
||||
return cs.storage.db.Update(func(tx *bolt.Tx) error {
|
||||
dt, err := src.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b := tx.Bucket([]byte(id.Key()))
|
||||
return b.Put([]byte(cacheKey), dt)
|
||||
})
|
||||
}
|
||||
|
||||
type fsCacheStore struct {
|
||||
mu sync.Mutex
|
||||
sources map[string]*cachedSource
|
||||
db *bolt.DB
|
||||
fs Backend
|
||||
gcTimer *time.Timer
|
||||
gcPolicy GCPolicy
|
||||
}
|
||||
|
||||
// CachePolicy defines policy for keeping a resource in cache
|
||||
type CachePolicy struct {
|
||||
Priority int
|
||||
LastUsed time.Time
|
||||
}
|
||||
|
||||
func defaultCachePolicy() CachePolicy {
|
||||
return CachePolicy{Priority: 10, LastUsed: time.Now()}
|
||||
}
|
||||
|
||||
func newFSCacheStore(opt Opt) (*fsCacheStore, error) {
|
||||
if err := os.MkdirAll(opt.Root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := filepath.Join(opt.Root, dbFile)
|
||||
db, err := bolt.Open(p, 0600, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open database file %s")
|
||||
}
|
||||
s := &fsCacheStore{db: db, sources: make(map[string]*cachedSource), fs: opt.Backend, gcPolicy: opt.GCPolicy}
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
return tx.ForEach(func(name []byte, b *bolt.Bucket) error {
|
||||
dt := b.Get([]byte(metaKey))
|
||||
if dt == nil {
|
||||
return nil
|
||||
}
|
||||
var sm sourceMeta
|
||||
if err := json.Unmarshal(dt, &sm); err != nil {
|
||||
return err
|
||||
}
|
||||
dir, err := s.fs.Get(sm.BackendID)
|
||||
if err != nil {
|
||||
return err // TODO: handle gracefully
|
||||
}
|
||||
source := &cachedSource{
|
||||
refs: make(map[*cachedSourceRef]struct{}),
|
||||
id: string(name),
|
||||
dir: dir,
|
||||
sourceMeta: sm,
|
||||
storage: s,
|
||||
}
|
||||
s.sources[string(name)] = source
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
s.gcTimer = s.startPeriodicGC(5 * time.Minute)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *fsCacheStore) startPeriodicGC(interval time.Duration) *time.Timer {
|
||||
var t *time.Timer
|
||||
t = time.AfterFunc(interval, func() {
|
||||
if err := s.GC(); err != nil {
|
||||
logrus.Errorf("build gc error: %v", err)
|
||||
}
|
||||
t.Reset(interval)
|
||||
})
|
||||
return t
|
||||
}
|
||||
|
||||
func (s *fsCacheStore) Close() error {
|
||||
s.gcTimer.Stop()
|
||||
return s.db.Close()
|
||||
}
|
||||
|
||||
func (s *fsCacheStore) New(id, sharedKey string) (*cachedSourceRef, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
var ret *cachedSource
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte(id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
backendID := stringid.GenerateRandomID()
|
||||
dir, err := s.fs.Get(backendID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
source := &cachedSource{
|
||||
refs: make(map[*cachedSourceRef]struct{}),
|
||||
id: id,
|
||||
dir: dir,
|
||||
sourceMeta: sourceMeta{
|
||||
BackendID: backendID,
|
||||
SharedKey: sharedKey,
|
||||
CachePolicy: defaultCachePolicy(),
|
||||
},
|
||||
storage: s,
|
||||
}
|
||||
dt, err := json.Marshal(source.sourceMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(metaKey), dt); err != nil {
|
||||
return err
|
||||
}
|
||||
s.sources[id] = source
|
||||
ret = source
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret.getRef(), nil
|
||||
}
|
||||
|
||||
func (s *fsCacheStore) Rebase(sharedKey, newid string) (*cachedSourceRef, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
var ret *cachedSource
|
||||
for id, snap := range s.sources {
|
||||
if snap.SharedKey == sharedKey && len(snap.refs) == 0 {
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
if err := tx.DeleteBucket([]byte(id)); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := tx.CreateBucket([]byte(newid))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
snap.id = newid
|
||||
snap.CachePolicy = defaultCachePolicy()
|
||||
dt, err := json.Marshal(snap.sourceMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(metaKey), dt); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(s.sources, id)
|
||||
s.sources[newid] = snap
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret = snap
|
||||
break
|
||||
}
|
||||
}
|
||||
if ret == nil {
|
||||
return nil, errors.Errorf("no candidate for rebase")
|
||||
}
|
||||
return ret.getRef(), nil
|
||||
}
|
||||
|
||||
func (s *fsCacheStore) Get(id string) (*cachedSourceRef, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
src, ok := s.sources[id]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("not found")
|
||||
}
|
||||
return src.getRef(), nil
|
||||
}
|
||||
|
||||
// DiskUsage reports how much data is allocated by the cache
|
||||
func (s *fsCacheStore) DiskUsage(ctx context.Context) (int64, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
var size int64
|
||||
|
||||
for _, snap := range s.sources {
|
||||
if len(snap.refs) == 0 {
|
||||
ss, err := snap.getSize(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size += ss
|
||||
}
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// Prune allows manually cleaning up the cache
|
||||
func (s *fsCacheStore) Prune(ctx context.Context) (uint64, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
var size uint64
|
||||
|
||||
for id, snap := range s.sources {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logrus.Debugf("Cache prune operation cancelled, pruned size: %d", size)
|
||||
// when the context is cancelled, only return current size and nil
|
||||
return size, nil
|
||||
default:
|
||||
}
|
||||
if len(snap.refs) == 0 {
|
||||
ss, err := snap.getSize(ctx)
|
||||
if err != nil {
|
||||
return size, err
|
||||
}
|
||||
if err := s.delete(id); err != nil {
|
||||
return size, errors.Wrapf(err, "failed to delete %s", id)
|
||||
}
|
||||
size += uint64(ss)
|
||||
}
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// GC runs a garbage collector on FSCache
|
||||
func (s *fsCacheStore) GC() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
var size uint64
|
||||
|
||||
ctx := context.Background()
|
||||
cutoff := time.Now().Add(-s.gcPolicy.MaxKeepDuration)
|
||||
var blacklist []*cachedSource
|
||||
|
||||
for id, snap := range s.sources {
|
||||
if len(snap.refs) == 0 {
|
||||
if cutoff.After(snap.CachePolicy.LastUsed) {
|
||||
if err := s.delete(id); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete %s", id)
|
||||
}
|
||||
} else {
|
||||
ss, err := snap.getSize(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size += uint64(ss)
|
||||
blacklist = append(blacklist, snap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(sortableCacheSources(blacklist))
|
||||
for _, snap := range blacklist {
|
||||
if size <= s.gcPolicy.MaxSize {
|
||||
break
|
||||
}
|
||||
ss, err := snap.getSize(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.delete(snap.id); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete %s", snap.id)
|
||||
}
|
||||
size -= uint64(ss)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// keep mu while calling this
|
||||
func (s *fsCacheStore) delete(id string) error {
|
||||
src, ok := s.sources[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if len(src.refs) > 0 {
|
||||
return errors.Errorf("can't delete %s because it has active references", id)
|
||||
}
|
||||
delete(s.sources, id)
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.DeleteBucket([]byte(id))
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.fs.Remove(src.BackendID)
|
||||
}
|
||||
|
||||
type sourceMeta struct {
|
||||
SharedKey string
|
||||
BackendID string
|
||||
CachePolicy CachePolicy
|
||||
Size int64
|
||||
}
|
||||
|
||||
//nolint:structcheck
|
||||
type cachedSource struct {
|
||||
sourceMeta
|
||||
refs map[*cachedSourceRef]struct{}
|
||||
id string
|
||||
dir string
|
||||
src *remotecontext.CachableSource
|
||||
storage *fsCacheStore
|
||||
cached bool // keep track if cache is up to date
|
||||
}
|
||||
|
||||
type cachedSourceRef struct {
|
||||
*cachedSource
|
||||
}
|
||||
|
||||
func (cs *cachedSource) Dir() string {
|
||||
return cs.dir
|
||||
}
|
||||
|
||||
// hold storage lock before calling
|
||||
func (cs *cachedSource) getRef() *cachedSourceRef {
|
||||
ref := &cachedSourceRef{cachedSource: cs}
|
||||
cs.refs[ref] = struct{}{}
|
||||
return ref
|
||||
}
|
||||
|
||||
// hold storage lock before calling
|
||||
func (cs *cachedSource) getSize(ctx context.Context) (int64, error) {
|
||||
if cs.sourceMeta.Size < 0 {
|
||||
ss, err := directory.Size(ctx, cs.dir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := cs.resetSize(ss); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
return cs.sourceMeta.Size, nil
|
||||
}
|
||||
|
||||
func (cs *cachedSource) resetSize(val int64) error {
|
||||
cs.sourceMeta.Size = val
|
||||
return cs.saveMeta()
|
||||
}
|
||||
func (cs *cachedSource) saveMeta() error {
|
||||
return cs.storage.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(cs.id))
|
||||
dt, err := json.Marshal(cs.sourceMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.Put([]byte(metaKey), dt)
|
||||
})
|
||||
}
|
||||
|
||||
func (csr *cachedSourceRef) Release() error {
|
||||
csr.cachedSource.storage.mu.Lock()
|
||||
defer csr.cachedSource.storage.mu.Unlock()
|
||||
delete(csr.cachedSource.refs, csr)
|
||||
if len(csr.cachedSource.refs) == 0 {
|
||||
go csr.cachedSource.storage.GC()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type detectChanges struct {
|
||||
f fsutil.ChangeFunc
|
||||
supported bool
|
||||
}
|
||||
|
||||
func (dc *detectChanges) HandleChange(kind fsutil.ChangeKind, path string, fi os.FileInfo, err error) error {
|
||||
if dc == nil {
|
||||
return nil
|
||||
}
|
||||
return dc.f(kind, path, fi, err)
|
||||
}
|
||||
|
||||
func (dc *detectChanges) MarkSupported(v bool) {
|
||||
if dc == nil {
|
||||
return
|
||||
}
|
||||
dc.supported = v
|
||||
}
|
||||
|
||||
func (dc *detectChanges) ContentHasher() fsutil.ContentHasher {
|
||||
return newTarsumHash
|
||||
}
|
||||
|
||||
type wrappedContext struct {
|
||||
builder.Source
|
||||
closer func() error
|
||||
}
|
||||
|
||||
func (wc *wrappedContext) Close() error {
|
||||
if err := wc.Source.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return wc.closer()
|
||||
}
|
||||
|
||||
type sortableCacheSources []*cachedSource
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s sortableCacheSources) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Less reports whether the element with
|
||||
// index i should sort before the element with index j.
|
||||
func (s sortableCacheSources) Less(i, j int) bool {
|
||||
return s[i].CachePolicy.LastUsed.Before(s[j].CachePolicy.LastUsed)
|
||||
}
|
||||
|
||||
// Swap swaps the elements with indexes i and j.
|
||||
func (s sortableCacheSources) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func newTarsumHash(stat *fsutiltypes.Stat) (hash.Hash, error) {
|
||||
fi := &fsutil.StatInfo{Stat: stat}
|
||||
p := stat.Path
|
||||
if fi.IsDir() {
|
||||
p += string(os.PathSeparator)
|
||||
}
|
||||
h, err := archive.FileInfoHeader(p, fi, stat.Linkname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.Name = p
|
||||
h.Uid = int(stat.Uid)
|
||||
h.Gid = int(stat.Gid)
|
||||
h.Linkname = stat.Linkname
|
||||
if stat.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range stat.Xattrs {
|
||||
h.Xattrs[k] = string(v)
|
||||
}
|
||||
}
|
||||
|
||||
tsh := &tarsumHash{h: h, Hash: sha256.New()}
|
||||
tsh.Reset()
|
||||
return tsh, nil
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (tsh *tarsumHash) Reset() {
|
||||
tsh.Hash.Reset()
|
||||
tarsum.WriteV1Header(tsh.h, tsh.Hash)
|
||||
}
|
||||
|
||||
type tarsumHash struct {
|
||||
hash.Hash
|
||||
h *tar.Header
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
package fscache // import "github.com/docker/docker/builder/fscache"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func TestFSCache(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "fscache")
|
||||
assert.Check(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
backend := NewNaiveCacheBackend(filepath.Join(tmpDir, "backend"))
|
||||
|
||||
opt := Opt{
|
||||
Root: tmpDir,
|
||||
Backend: backend,
|
||||
GCPolicy: GCPolicy{MaxSize: 15, MaxKeepDuration: time.Hour},
|
||||
}
|
||||
|
||||
fscache, err := NewFSCache(opt)
|
||||
assert.Check(t, err)
|
||||
|
||||
defer fscache.Close()
|
||||
|
||||
err = fscache.RegisterTransport("test", &testTransport{})
|
||||
assert.Check(t, err)
|
||||
|
||||
src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"})
|
||||
assert.Check(t, err)
|
||||
|
||||
dt, err := ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(string(dt), "data"))
|
||||
|
||||
// same id doesn't recalculate anything
|
||||
src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"})
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(src1.Root().Path(), src2.Root().Path()))
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(string(dt), "data"))
|
||||
assert.Check(t, src2.Close())
|
||||
|
||||
src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"})
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, src1.Root().Path() != src3.Root().Path())
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo2"))
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(string(dt), "data2"))
|
||||
|
||||
s, err := fscache.DiskUsage(context.TODO())
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(s, int64(0)))
|
||||
|
||||
assert.Check(t, src3.Close())
|
||||
|
||||
s, err = fscache.DiskUsage(context.TODO())
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(s, int64(5)))
|
||||
|
||||
// new upload with the same shared key shoutl overwrite
|
||||
src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"})
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, src1.Root().Path() != src3.Root().Path())
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo3"))
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(string(dt), "data3"))
|
||||
assert.Check(t, is.Equal(src4.Root().Path(), src3.Root().Path()))
|
||||
assert.Check(t, src4.Close())
|
||||
|
||||
s, err = fscache.DiskUsage(context.TODO())
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(s, int64(10)))
|
||||
|
||||
// this one goes over the GC limit
|
||||
src5, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo4", "datadata", "baz"})
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, src5.Close())
|
||||
|
||||
// GC happens async
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// only last insertion after GC
|
||||
s, err = fscache.DiskUsage(context.TODO())
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(s, int64(8)))
|
||||
|
||||
// prune deletes everything
|
||||
released, err := fscache.Prune(context.TODO())
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(released, uint64(8)))
|
||||
|
||||
s, err = fscache.DiskUsage(context.TODO())
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(s, int64(0)))
|
||||
}
|
||||
|
||||
type testTransport struct {
|
||||
}
|
||||
|
||||
func (t *testTransport) Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error {
|
||||
testid := id.(*testIdentifier)
|
||||
return ioutil.WriteFile(filepath.Join(dest, testid.filename), []byte(testid.data), 0600)
|
||||
}
|
||||
|
||||
type testIdentifier struct {
|
||||
filename string
|
||||
data string
|
||||
sharedKey string
|
||||
}
|
||||
|
||||
func (t *testIdentifier) Key() string {
|
||||
return t.filename
|
||||
}
|
||||
func (t *testIdentifier) SharedKey() string {
|
||||
return t.sharedKey
|
||||
}
|
||||
func (t *testIdentifier) Transport() string {
|
||||
return "test"
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
package fscache // import "github.com/docker/docker/builder/fscache"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewNaiveCacheBackend is a basic backend implementation for fscache
|
||||
func NewNaiveCacheBackend(root string) Backend {
|
||||
return &naiveCacheBackend{root: root}
|
||||
}
|
||||
|
||||
type naiveCacheBackend struct {
|
||||
root string
|
||||
}
|
||||
|
||||
func (tcb *naiveCacheBackend) Get(id string) (string, error) {
|
||||
d := filepath.Join(tcb.root, id)
|
||||
if err := os.MkdirAll(d, 0700); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create tmp dir for %s", d)
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
func (tcb *naiveCacheBackend) Remove(id string) error {
|
||||
return errors.WithStack(os.RemoveAll(filepath.Join(tcb.root, id)))
|
||||
}
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/docker/docker/api/server/router/volume"
|
||||
buildkit "github.com/docker/docker/builder/builder-next"
|
||||
"github.com/docker/docker/builder/dockerfile"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
"github.com/docker/docker/cli/debug"
|
||||
"github.com/docker/docker/daemon"
|
||||
"github.com/docker/docker/daemon/cluster"
|
||||
|
@ -269,7 +268,6 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
|||
type routerOptions struct {
|
||||
sessionManager *session.Manager
|
||||
buildBackend *buildbackend.Backend
|
||||
buildCache *fscache.FSCache // legacy
|
||||
features *map[string]bool
|
||||
buildkit *buildkit.Builder
|
||||
daemon *daemon.Daemon
|
||||
|
@ -284,21 +282,7 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e
|
|||
return opts, errors.Wrap(err, "failed to create sessionmanager")
|
||||
}
|
||||
|
||||
builderStateDir := filepath.Join(config.Root, "builder")
|
||||
|
||||
buildCache, err := fscache.NewFSCache(fscache.Opt{
|
||||
Backend: fscache.NewNaiveCacheBackend(builderStateDir),
|
||||
Root: builderStateDir,
|
||||
GCPolicy: fscache.GCPolicy{ // TODO: expose this in config
|
||||
MaxSize: 1024 * 1024 * 512, // 512MB
|
||||
MaxKeepDuration: 7 * 24 * time.Hour, // 1 week
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return opts, errors.Wrap(err, "failed to create fscache")
|
||||
}
|
||||
|
||||
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), sm, buildCache, d.IdentityMapping())
|
||||
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
|
@ -319,14 +303,13 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e
|
|||
return opts, err
|
||||
}
|
||||
|
||||
bb, err := buildbackend.NewBackend(d.ImageService(), manager, buildCache, bk)
|
||||
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk)
|
||||
if err != nil {
|
||||
return opts, errors.Wrap(err, "failed to create buildmanager")
|
||||
}
|
||||
return routerOptions{
|
||||
sessionManager: sm,
|
||||
buildBackend: bb,
|
||||
buildCache: buildCache,
|
||||
buildkit: bk,
|
||||
features: d.Features(),
|
||||
daemon: d,
|
||||
|
@ -494,7 +477,7 @@ func initRouter(opts routerOptions) {
|
|||
checkpointrouter.NewRouter(opts.daemon, decoder),
|
||||
container.NewRouter(opts.daemon, decoder),
|
||||
image.NewRouter(opts.daemon.ImageService()),
|
||||
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache, opts.buildkit, opts.features),
|
||||
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features),
|
||||
volume.NewRouter(opts.daemon.VolumesService()),
|
||||
build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
|
||||
sessionrouter.NewRouter(opts.sessionManager),
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
)
|
||||
|
||||
func TestBuildWithSession(t *testing.T) {
|
||||
t.Skip("TODO: BuildKit")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.39"), "experimental in older versions")
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue