1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

vendor: update buildkit to 37d53758

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2019-06-04 00:34:16 -07:00
parent 3d21b86e0a
commit 85bbbd4495
19 changed files with 347 additions and 31 deletions

View file

@ -135,7 +135,7 @@ func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *
dt []byte
}
res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref, sm), is.ContentStore, platform)
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref, sm), is.ContentStore, nil, platform)
if err != nil {
return nil, err
}

View file

@ -27,7 +27,7 @@ github.com/imdario/mergo 7c29201646fa3de8506f70121347
golang.org/x/sync e225da77a7e68af35c70ccbf71af2b83e6acac3c
# buildkit
github.com/moby/buildkit f238f1efb04f00bf0cc147141fda9ddb55c8bc49
github.com/moby/buildkit 37d53758a68d9f5cede1806dbb2da7c3caa8d5bc
github.com/tonistiigi/fsutil 3bbb99cdbd76619ab717299830c60f6f2a533a6b
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7

View file

@ -184,6 +184,19 @@ The local client will copy the files directly to the client. This is useful if B
buildctl build ... --output type=local,dest=path/to/output-dir
```
To export specific files use multi-stage builds with a scratch stage and copy the needed files into that stage with `COPY --from`.
```dockerfile
...
FROM scratch as testresult
COPY --from=builder /usr/src/app/testresult.xml .
...
```
```
buildctl build ... --opt target=testresult --output type=local,dest=path/to/output-dir
```
Tar exporter is similar to local exporter but transfers the files through a tarball.
```
@ -268,6 +281,16 @@ export BUILDKIT_HOST=tcp://0.0.0.0:1234
buildctl build --help
```
To run client and an ephemeral daemon in a single container ("daemonless mode"):
```
docker run -it --rm --privileged -v /path/to/dir:/tmp/work --entrypoint buildctl-daemonless.sh moby/buildkit:master build --frontend dockerfile.v0 --local context=/tmp/work --local dockerfile=/tmp/work
```
or
```
docker run -it --rm --security-opt seccomp=unconfined --security-opt apparmor=unconfined -e BUILDKITD_FLAGS=--oci-worker-no-process-sandbox -v /path/to/dir:/tmp/work --entrypoint buildctl-daemonless.sh moby/buildkit:master-rootless build --frontend dockerfile.v0 --local context=/tmp/work --local dockerfile=/tmp/work
```
The images can be also built locally using `./hack/dockerfiles/test.Dockerfile` (or `./hack/dockerfiles/test.buildkit.Dockerfile` if you already have BuildKit).
Run `make images` to build the images as `moby/buildkit:local` and `moby/buildkit:local-rootless`.

View file

@ -60,6 +60,10 @@ func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheCont
return getDefaultManager().SetCacheContext(ctx, md, cc)
}
func ClearCacheContext(md *metadata.StorageItem) {
getDefaultManager().clearCacheContext(md.ID())
}
type CacheContext interface {
Checksum(ctx context.Context, ref cache.Mountable, p string, followLinks bool) (digest.Digest, error)
ChecksumWildcard(ctx context.Context, ref cache.Mountable, p string, followLinks bool) (digest.Digest, error)
@ -142,6 +146,12 @@ func (cm *cacheManager) SetCacheContext(ctx context.Context, md *metadata.Storag
return nil
}
func (cm *cacheManager) clearCacheContext(id string) {
cm.lruMu.Lock()
cm.lru.Remove(id)
cm.lruMu.Unlock()
}
type cacheContext struct {
mu sync.RWMutex
md *metadata.StorageItem

View file

@ -125,7 +125,7 @@ func (cm *cacheManager) GetFromSnapshotter(ctx context.Context, id string, opts
}
// get requires manager lock to be taken
func (cm *cacheManager) get(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (ImmutableRef, error) {
func (cm *cacheManager) get(ctx context.Context, id string, fromSnapshotter bool, opts ...RefOption) (*immutableRef, error) {
rec, err := cm.getRecord(ctx, id, fromSnapshotter, opts...)
if err != nil {
return nil, err
@ -193,7 +193,7 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotte
return nil, errors.Wrap(errNotFound, err.Error())
}
var parent ImmutableRef
var parent *immutableRef
if info.Parent != "" {
parent, err = cm.get(ctx, info.Parent, fromSnapshotter, append(opts, NoUpdateLastUsed)...)
if err != nil {
@ -201,7 +201,9 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotte
}
defer func() {
if retErr != nil {
parent.Release(context.TODO())
parent.mu.Lock()
parent.release(context.TODO())
parent.mu.Unlock()
}
}()
}
@ -224,9 +226,6 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotte
}
if err := initializeMetadata(rec, opts...); err != nil {
if parent != nil {
parent.Release(context.TODO())
}
return nil, err
}
@ -237,18 +236,18 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, fromSnapshotte
func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error) {
id := identity.NewID()
var parent ImmutableRef
var parent *immutableRef
var parentID string
if s != nil {
var err error
parent, err = cm.Get(ctx, s.ID(), NoUpdateLastUsed)
p, err := cm.Get(ctx, s.ID(), NoUpdateLastUsed)
if err != nil {
return nil, err
}
if err := parent.Finalize(ctx, true); err != nil {
if err := p.Finalize(ctx, true); err != nil {
return nil, err
}
parentID = parent.ID()
parentID = p.ID()
parent = p.(*immutableRef)
}
if err := cm.Snapshotter.Prepare(ctx, id, parentID); err != nil {
@ -737,6 +736,10 @@ func CachePolicyRetain(m withMetadata) error {
return queueCachePolicy(m.Metadata(), cachePolicyRetain)
}
func CachePolicyDefault(m withMetadata) error {
return queueCachePolicy(m.Metadata(), cachePolicyDefault)
}
func WithDescription(descr string) RefOption {
return func(m withMetadata) error {
return queueDescription(m.Metadata(), descr)

View file

@ -50,7 +50,7 @@ type cacheRecord struct {
mutable bool
refs map[ref]struct{}
parent ImmutableRef
parent *immutableRef
md *metadata.StorageItem
// dead means record is marked as deleted
@ -126,14 +126,17 @@ func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
}
func (cr *cacheRecord) Parent() ImmutableRef {
return cr.parentRef(true)
if p := cr.parentRef(true); p != nil { // avoid returning typed nil pointer
return p
}
return nil
}
func (cr *cacheRecord) parentRef(hidden bool) ImmutableRef {
if cr.parent == nil {
func (cr *cacheRecord) parentRef(hidden bool) *immutableRef {
p := cr.parent
if p == nil {
return nil
}
p := cr.parent.(*immutableRef)
p.mu.Lock()
defer p.mu.Unlock()
return p.ref(hidden)
@ -181,7 +184,7 @@ func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mount
func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error {
delete(cr.cm.records, cr.ID())
if cr.parent != nil {
if err := cr.parent.(*immutableRef).release(ctx); err != nil {
if err := cr.parent.release(ctx); err != nil {
return err
}
}
@ -315,7 +318,7 @@ func (sr *mutableRef) updateLastUsed() bool {
return sr.triggerLastUsed
}
func (sr *mutableRef) commit(ctx context.Context) (ImmutableRef, error) {
func (sr *mutableRef) commit(ctx context.Context) (*immutableRef, error) {
if !sr.mutable || len(sr.refs) == 0 {
return nil, errors.Wrapf(errInvalid, "invalid mutable ref %p", sr)
}
@ -398,7 +401,7 @@ func (sr *mutableRef) release(ctx context.Context) error {
}
}
if sr.parent != nil {
if err := sr.parent.(*immutableRef).release(ctx); err != nil {
if err := sr.parent.release(ctx); err != nil {
return err
}
}

View file

@ -87,7 +87,7 @@ func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string
return res.dgst, res.config, nil
}
dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, platform)
dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform)
if err != nil {
return "", nil, err
}

View file

@ -3,6 +3,7 @@ package control
import (
"context"
"sync"
"sync/atomic"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
@ -17,6 +18,7 @@ import (
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/util/throttle"
"github.com/moby/buildkit/worker"
"github.com/pkg/errors"
@ -42,6 +44,7 @@ type Controller struct { // TODO: ControlService
gatewayForwarder *controlgateway.GatewayForwarder
throttledGC func()
gcmu sync.Mutex
buildCount int64
}
func NewController(opt Opt) (*Controller, error) {
@ -110,6 +113,10 @@ func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageReque
}
func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Control_PruneServer) error {
if atomic.LoadInt64(&c.buildCount) == 0 {
imageutil.CancelCacheLeases()
}
ch := make(chan client.UsageInfo)
eg, ctx := errgroup.WithContext(stream.Context())
@ -207,6 +214,9 @@ func translateLegacySolveRequest(req *controlapi.SolveRequest) error {
}
func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) {
atomic.AddInt64(&c.buildCount, 1)
defer atomic.AddInt64(&c.buildCount, -1)
if err := translateLegacySolveRequest(req); err != nil {
return nil, err
}

View file

@ -3,8 +3,12 @@
package dockerfile2llb
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
@ -40,6 +44,40 @@ func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool {
return false
}
func setCacheUIDGIDFileOp(m *instructions.Mount, st llb.State) llb.State {
uid := 0
gid := 0
mode := os.FileMode(0755)
if m.UID != nil {
uid = int(*m.UID)
}
if m.GID != nil {
gid = int(*m.GID)
}
if m.Mode != nil {
mode = os.FileMode(*m.Mode)
}
return st.File(llb.Mkdir("/cache", mode, llb.WithUIDGID(uid, gid)), llb.WithCustomName("[internal] settings cache mount permissions"))
}
func setCacheUIDGID(m *instructions.Mount, st llb.State, fileop bool) llb.State {
if fileop {
return setCacheUIDGIDFileOp(m, st)
}
var b strings.Builder
if m.UID != nil {
b.WriteString(fmt.Sprintf("chown %d /mnt/cache;", *m.UID))
}
if m.GID != nil {
b.WriteString(fmt.Sprintf("chown :%d /mnt/cache;", *m.GID))
}
if m.Mode != nil {
b.WriteString(fmt.Sprintf("chmod %s /mnt/cache;", strconv.FormatUint(*m.Mode, 8)))
}
return llb.Image("busybox").Run(llb.Shlex(fmt.Sprintf("sh -c 'mkdir -p /mnt/cache;%s'", b.String())), llb.WithCustomName("[internal] settings cache mount permissions")).AddMount("/mnt", st)
}
func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) {
var out []llb.RunOption
mounts := instructions.GetMounts(c)
@ -97,7 +135,13 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*
}
if src := path.Join("/", mount.Source); src != "/" {
mountOpts = append(mountOpts, llb.SourcePath(src))
} else {
if mount.UID != nil || mount.GID != nil || mount.Mode != nil {
st = setCacheUIDGID(mount, st, useFileOp(opt.buildArgValues, opt.llbCaps))
mountOpts = append(mountOpts, llb.SourcePath("/cache"))
}
}
out = append(out, llb.AddMount(target, st, mountOpts...))
d.ctxPaths[path.Join("/", filepath.ToSlash(mount.Source))] = struct{}{}

View file

@ -206,18 +206,18 @@ func parseMount(value string) (*Mount, error) {
}
}
fileInfoAllowed := m.Type == MountTypeSecret || m.Type == MountTypeSSH
fileInfoAllowed := m.Type == MountTypeSecret || m.Type == MountTypeSSH || m.Type == MountTypeCache
if m.Mode != nil && !fileInfoAllowed {
return nil, errors.Errorf("mode not allowed for %q type mounts")
return nil, errors.Errorf("mode not allowed for %q type mounts", m.Type)
}
if m.UID != nil && !fileInfoAllowed {
return nil, errors.Errorf("uid not allowed for %q type mounts")
return nil, errors.Errorf("uid not allowed for %q type mounts", m.Type)
}
if m.GID != nil && !fileInfoAllowed {
return nil, errors.Errorf("gid not allowed for %q type mounts")
return nil, errors.Errorf("gid not allowed for %q type mounts", m.Type)
}
if roAuto {

View file

@ -35,7 +35,7 @@ type Node struct {
Original string // original line used before parsing
Flags []string // only top Node should have this set
StartLine int // the line in the original dockerfile where the node begins
endLine int // the line in the original dockerfile where the node ends
EndLine int // the line in the original dockerfile where the node ends
}
// Dump dumps the AST defined by `node` as a list of sexps.
@ -65,7 +65,7 @@ func (node *Node) Dump() string {
func (node *Node) lines(start, end int) {
node.StartLine = start
node.endLine = end
node.EndLine = end
}
// AddChild adds a new child node, and updates line information
@ -74,7 +74,7 @@ func (node *Node) AddChild(child *Node, startLine, endLine int) {
if node.StartLine < 0 {
node.StartLine = startLine
}
node.endLine = endLine
node.EndLine = endLine
node.Children = append(node.Children, child)
}

View file

@ -2,6 +2,7 @@ package blobmapping
import (
"context"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/snapshots"
@ -122,6 +123,16 @@ func (s *Snapshotter) SetBlob(ctx context.Context, key string, diffID, blobsum d
return err
}
}
// update gc.root cause blob might be held by lease only
if _, err := s.opt.Content.Update(ctx, content.Info{
Digest: blobsum,
Labels: map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339Nano),
},
}, "labels.containerd.io/gc.root"); err != nil {
return err
}
md, _ := s.opt.MetadataStore.Get(key)
v, err := metadata.NewValue(DiffPair{DiffID: diffID, Blobsum: blobsum})

View file

@ -133,6 +133,12 @@ func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.Immutable
defer func() {
if retErr != nil && mutable != nil {
// on error remove the record as checksum update is in undefined state
cache.CachePolicyDefault(mutable)
if err := mutable.Metadata().Commit(); err != nil {
logrus.Errorf("failed to reset mutable cachepolicy: %v", err)
}
contenthash.ClearCacheContext(mutable.Metadata())
go mutable.Release(context.TODO())
}
}()

View file

@ -21,6 +21,9 @@ func SupportedPlatforms() []string {
if p := "linux/arm64"; def != p && arm64Supported() == nil {
arr = append(arr, p)
}
if p := "linux/riscv64"; def != p && riscv64Supported() == nil {
arr = append(arr, p)
}
if !strings.HasPrefix(def, "linux/arm/") && armSupported() == nil {
arr = append(arr, "linux/arm/v7", "linux/arm/v6")
} else if def == "linux/arm/v7" {
@ -47,6 +50,11 @@ func WarnIfUnsupported(pfs []string) {
printPlatfromWarning(p, err)
}
}
if p == "linux/riscv64" {
if err := riscv64Supported(); err != nil {
printPlatfromWarning(p, err)
}
}
if strings.HasPrefix(p, "linux/arm/v6") || strings.HasPrefix(p, "linux/arm/v7") {
if err := armSupported(); err != nil {
printPlatfromWarning(p, err)

View file

@ -0,0 +1,8 @@
// +build !riscv64
package binfmt_misc
// This file is generated by running make inside the binfmt_misc package.
// Do not edit manually.
const Binaryriscv64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x86\xcf\x0c\x20\x5e\x05\x03\x44\xcc\x01\x2a\x3e\x03\x4a\xb3\x80\xc5\x2c\x18\x18\x19\x1c\x18\x98\x19\x98\xc0\xaa\x58\x19\x90\x01\x23\x0a\xdd\x02\xe5\xc1\x68\x06\x01\x08\x25\xcc\xca\xc0\x30\x99\xe3\x02\x6b\x31\x88\xa3\x57\x9c\x51\x5c\x52\x54\x92\x98\xc4\xa0\x57\x92\x5a\x51\xc2\x40\x05\xc0\x0d\x75\x01\x1b\x94\x0f\xf3\x4f\x05\x94\xcf\x83\xa6\x9e\x05\x8d\x0f\x52\xcd\x8c\xc5\x5c\x98\x3f\x04\xb1\xa8\x47\x06\x80\x00\x00\x00\xff\xff\x39\x41\xdf\xa1\x58\x01\x00\x00"

View file

@ -0,0 +1,7 @@
// +build !riscv64
package binfmt_misc
func riscv64Supported() error {
return check(Binaryriscv64)
}

View file

@ -0,0 +1,7 @@
// +build riscv64
package binfmt_misc
func riscv64Supported() error {
return nil
}

View file

@ -3,12 +3,17 @@ package imageutil
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes"
"github.com/moby/buildkit/util/leaseutil"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@ -19,7 +24,19 @@ type ContentCache interface {
content.Provider
}
func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, p *specs.Platform) (digest.Digest, []byte, error) {
var leasesMu sync.Mutex
var leasesF []func(context.Context) error
func CancelCacheLeases() {
leasesMu.Lock()
for _, f := range leasesF {
f(context.TODO())
}
leasesF = nil
leasesMu.Unlock()
}
func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *specs.Platform) (digest.Digest, []byte, error) {
// TODO: fix buildkit to take interface instead of struct
var platform platforms.MatchComparer
if p != nil {
@ -32,6 +49,20 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
return "", nil, errors.WithStack(err)
}
if leaseManager != nil {
ctx2, done, err := leaseutil.WithLease(ctx, leaseManager, leases.WithExpiration(5*time.Minute))
if err != nil {
return "", nil, errors.WithStack(err)
}
ctx = ctx2
defer func() {
// this lease is not deleted to allow other components to access manifest/config from cache. It will be deleted after 5 min deadline or on pruning inactive builder
leasesMu.Lock()
leasesF = append(leasesF, done)
leasesMu.Unlock()
}()
}
desc := specs.Descriptor{
Digest: ref.Digest(),
}
@ -62,9 +93,14 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
return readSchema1Config(ctx, ref.String(), desc, fetcher, cache)
}
children := childrenConfigHandler(cache, platform)
if m, ok := cache.(content.Manager); ok {
children = SetChildrenLabelsNonBlobs(m, children)
}
handlers := []images.Handler{
fetchWithoutRoot(remotes.FetchHandler(cache, fetcher)),
childrenConfigHandler(cache, platform),
children,
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil {
return "", nil, err
@ -171,3 +207,39 @@ func DetectManifestBlobMediaType(dt []byte) (string, error) {
}
return images.MediaTypeDockerSchema2ManifestList, nil
}
func SetChildrenLabelsNonBlobs(manager content.Manager, f images.HandlerFunc) images.HandlerFunc {
return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
children, err := f(ctx, desc)
if err != nil {
return children, err
}
if len(children) > 0 {
info := content.Info{
Digest: desc.Digest,
Labels: map[string]string{},
}
fields := []string{}
for i, ch := range children {
switch ch.MediaType {
case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, specs.MediaTypeImageLayer, specs.MediaTypeImageLayerGzip:
continue
default:
}
info.Labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = ch.Digest.String()
fields = append(fields, fmt.Sprintf("labels.containerd.io/gc.ref.content.%d", i))
}
if len(info.Labels) > 0 {
_, err := manager.Update(ctx, info, fields...)
if err != nil {
return nil, err
}
}
}
return children, err
}
}

View file

@ -0,0 +1,104 @@
package leaseutil
import (
"context"
"time"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/namespaces"
bolt "go.etcd.io/bbolt"
)
func WithLease(ctx context.Context, ls leases.Manager, opts ...leases.Opt) (context.Context, func(context.Context) error, error) {
_, ok := leases.FromContext(ctx)
if ok {
return ctx, func(context.Context) error {
return nil
}, nil
}
l, err := ls.Create(ctx, append([]leases.Opt{leases.WithRandomID(), leases.WithExpiration(time.Hour)}, opts...)...)
if err != nil {
return nil, nil, err
}
ctx = leases.WithLease(ctx, l.ID)
return ctx, func(ctx context.Context) error {
return ls.Delete(ctx, l)
}, nil
}
func NewManager(mdb *metadata.DB) leases.Manager {
return &local{db: mdb}
}
type local struct {
db *metadata.DB
}
func (l *local) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) {
var lease leases.Lease
if err := l.db.Update(func(tx *bolt.Tx) error {
var err error
lease, err = metadata.NewLeaseManager(tx).Create(ctx, opts...)
return err
}); err != nil {
return leases.Lease{}, err
}
return lease, nil
}
func (l *local) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error {
var do leases.DeleteOptions
for _, opt := range opts {
if err := opt(ctx, &do); err != nil {
return err
}
}
if err := l.db.Update(func(tx *bolt.Tx) error {
return metadata.NewLeaseManager(tx).Delete(ctx, lease)
}); err != nil {
return err
}
return nil
}
func (l *local) List(ctx context.Context, filters ...string) ([]leases.Lease, error) {
var ll []leases.Lease
if err := l.db.View(func(tx *bolt.Tx) error {
var err error
ll, err = metadata.NewLeaseManager(tx).List(ctx, filters...)
return err
}); err != nil {
return nil, err
}
return ll, nil
}
func WithNamespace(lm leases.Manager, ns string) leases.Manager {
return &nsLM{Manager: lm, ns: ns}
}
type nsLM struct {
leases.Manager
ns string
}
func (l *nsLM) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) {
ctx = namespaces.WithNamespace(ctx, l.ns)
return l.Manager.Create(ctx, opts...)
}
func (l *nsLM) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error {
ctx = namespaces.WithNamespace(ctx, l.ns)
return l.Manager.Delete(ctx, lease, opts...)
}
func (l *nsLM) List(ctx context.Context, filters ...string) ([]leases.Lease, error) {
ctx = namespaces.WithNamespace(ctx, l.ns)
return l.Manager.List(ctx, filters...)
}