1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Bump containerd to cc969fb42f427a68a8cc6870ef47f17

Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
Michael Crosby 2017-12-04 14:14:42 -05:00
parent 7ef9266ba0
commit ad2b34f205
29 changed files with 334 additions and 97 deletions

View file

@ -4,7 +4,7 @@ TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f
CONTAINERD_COMMIT=6bff39c643886dfa3d546e83a90a527b64ddeacf
CONTAINERD_COMMIT=cc969fb42f427a68a8cc6870ef47f17304b83962
TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384

View file

@ -103,7 +103,7 @@ github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
# containerd
github.com/containerd/containerd 6bff39c643886dfa3d546e83a90a527b64ddeacf
github.com/containerd/containerd cc969fb42f427a68a8cc6870ef47f17304b83962
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f
@ -111,7 +111,7 @@ github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
github.com/containerd/go-runc ed1cbe1fc31f5fb2359d3a54b6330d1a097858b7
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
github.com/dmcgowan/go-tar go1.10
github.com/stevvooe/ttrpc 8c92e22ce0c492875ccaac3ab06143a77d8ed0c1
github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
# cluster
github.com/docker/swarmkit de950a7ed842c7b7e47e9451cde9bf8f96031894

View file

@ -15,6 +15,28 @@ containerd is designed to be embedded into a larger system, rather than being us
If you are interested in trying out containerd please see our [Getting Started Guide](docs/getting-started.md).
## Runtime Requirements
Runtime requirements for containerd are very minimal. Most interactions with
the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). There are specific features
used by containerd core code and snapshotters that will require a minimum kernel
version on Linux. With the understood caveat of distro kernel versioning, a
reasonable starting point for Linux is a minimum 4.x kernel version.
The overlay filesystem snapshotter, used by default, uses features that were
finalized in the 4.x kernel series. If you choose to use btrfs, there may
be more flexibility in kernel version (minimum recommended is 3.18), but will
require the btrfs kernel module and btrfs tools to be installed on your Linux
distribution.
To use Linux checkpoint and restore features, you will need `criu` installed on
your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
The current required version of runc is always listed in [RUNC.md](/RUNC.md).
Build requirements for developers are listed in the [Developer Quick-Start](#developer-quick-start) section.
## Features
### Client
@ -93,7 +115,6 @@ image, err := client.Pull(context, "docker.io/library/redis:latest", containerd.
redis, err := client.NewContainer(context, "redis-master",
containerd.WithNewSnapshot("redis-rootfs", image),
containerd.WithNewSpec(oci.WithImageConfig(image)),
)
// use a readonly filesystem with multiple containers
@ -150,7 +171,7 @@ defer task.Delete(context)
err := task.Start(context)
```
## Developer Quick-Start
## Developer Quick Start
To build the daemon and `ctr` simple test client, the following build system dependencies are required:

View file

@ -162,11 +162,17 @@ func (c *container) Image(ctx context.Context) (Image, error) {
}, nil
}
func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (Task, error) {
func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (_ Task, err error) {
i, err := ioCreate(c.id)
if err != nil {
return nil, err
}
defer func() {
if err != nil && i != nil {
i.Cancel()
i.Close()
}
}()
cfg := i.Config()
request := &tasks.CreateTaskRequest{
ContainerID: c.id,

View file

@ -24,7 +24,6 @@ import (
"github.com/opencontainers/image-spec/identity"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
// WithCheckpoint allows a container to be created from the checkpointed information
@ -193,14 +192,17 @@ func remapRootFS(mounts []mount.Mount, uid, gid uint32) error {
if err != nil {
return err
}
defer os.RemoveAll(root)
defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
defer unix.Unmount(root, 0)
return filepath.Walk(root, incrementFS(root, uid, gid))
err = filepath.Walk(root, incrementFS(root, uid, gid))
if uerr := mount.Unmount(root, 0); err == nil {
err = uerr
}
return err
}
func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {

View file

@ -62,7 +62,7 @@ func NewStore(root string) (content.Store, error) {
// require labels and should use `NewStore`. `NewLabeledStore` is primarily
// useful for tests or standalone implementations.
func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
return nil, err
}

View file

@ -147,7 +147,7 @@ func (i *image) getLayers(ctx context.Context, platform string) ([]rootfs.Layer,
manifest, err := images.Manifest(ctx, cs, i.i.Target, platform)
if err != nil {
return nil, errors.Wrap(err, "")
return nil, err
}
diffIDs, err := i.i.RootFS(ctx, cs, platform)

View file

@ -187,13 +187,13 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
return descs, nil
}
return nil, errors.Wrap(errdefs.ErrNotFound, "could not resolve manifest")
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
}), image); err != nil {
return ocispec.Manifest{}, err
}
if m == nil {
return ocispec.Manifest{}, errors.Wrap(errdefs.ErrNotFound, "manifest not found")
return ocispec.Manifest{}, errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
}
return *m, nil
@ -257,7 +257,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip
return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
}
return false, nil, nil, nil, errors.Wrap(err, "image check failed")
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
}
// TODO(stevvooe): It is possible that referenced conponents could have
@ -272,7 +272,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip
missing = append(missing, desc)
continue
} else {
return false, nil, nil, nil, err
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
}
}
ra.Close()

View file

@ -75,10 +75,10 @@ type bundle struct {
type ShimOpt func(*bundle, string, *runctypes.RuncOptions) (shim.Config, client.Opt)
// ShimRemote is a ShimOpt for connecting and starting a remote shim
func ShimRemote(shimBinary, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) ShimOpt {
func ShimRemote(shimBinary, daemonAddress, cgroup string, debug bool, exitHandler func()) ShimOpt {
return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) {
return b.shimConfig(ns, ropts),
client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, nonewns, debug, exitHandler)
client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, debug, exitHandler)
}
}

View file

@ -22,6 +22,7 @@ import (
shim "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/plugin"
@ -78,17 +79,6 @@ type Config struct {
NoShim bool `toml:"no_shim"`
// Debug enable debug on the shim
ShimDebug bool `toml:"shim_debug"`
// ShimNoMountNS prevents the runtime from putting shims into their own mount namespace.
//
// Putting the shim in its own mount namespace ensure that any mounts made
// by it in order to get the task rootfs ready will be undone regardless
// on how the shim dies.
//
// NOTE: This should only be used in kernel older than 3.18 to avoid shims
// from causing a DoS in their parent namespace due to having a copy of
// mounts previously there which would prevent unlink, rename and remove
// operations on those mountpoints.
ShimNoMountNS bool `toml:"shim_no_newns"`
}
// New returns a configured runtime
@ -226,8 +216,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts
}).Warn("failed to clen up after killed shim")
}
}
shimopt = ShimRemote(r.config.Shim, r.address, cgroup,
r.config.ShimNoMountNS, r.config.ShimDebug, exitHandler)
shimopt = ShimRemote(r.config.Shim, r.address, cgroup, r.config.ShimDebug, exitHandler)
}
s, err := bundle.NewShimClient(ctx, namespace, shimopt, ropts)
@ -486,7 +475,7 @@ func (r *Runtime) terminate(ctx context.Context, bundle *bundle, ns, id string)
}); err != nil {
log.G(ctx).WithError(err).Warnf("delete runtime state %s", id)
}
if err := unix.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
if err := mount.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
log.G(ctx).WithError(err).WithFields(logrus.Fields{
"path": bundle.path,
"id": id,

View file

@ -34,7 +34,7 @@ var empty = &ptypes.Empty{}
type Opt func(context.Context, shim.Config) (shimapi.ShimService, io.Closer, error)
// WithStart executes a new shim process
func WithStart(binary, address, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) Opt {
func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHandler func()) Opt {
return func(ctx context.Context, config shim.Config) (_ shimapi.ShimService, _ io.Closer, err error) {
socket, err := newSocket(address)
if err != nil {
@ -47,7 +47,7 @@ func WithStart(binary, address, daemonAddress, cgroup string, nonewns, debug boo
}
defer f.Close()
cmd := newCommand(binary, daemonAddress, nonewns, debug, config, f)
cmd := newCommand(binary, daemonAddress, debug, config, f)
ec, err := reaper.Default.Start(cmd)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to start shim")
@ -87,7 +87,7 @@ func WithStart(binary, address, daemonAddress, cgroup string, nonewns, debug boo
}
}
func newCommand(binary, daemonAddress string, nonewns, debug bool, config shim.Config, socket *os.File) *exec.Cmd {
func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File) *exec.Cmd {
selfExe, err := os.Executable()
if err != nil {
panic(err)
@ -117,7 +117,7 @@ func newCommand(binary, daemonAddress string, nonewns, debug bool, config shim.C
// make sure the shim can be re-parented to system init
// and is cloned in a new mount namespace because the overlay/filesystems
// will be mounted by the shim
cmd.SysProcAttr = getSysProcAttr(nonewns)
cmd.SysProcAttr = getSysProcAttr()
cmd.ExtraFiles = append(cmd.ExtraFiles, socket)
if debug {
cmd.Stdout = os.Stdout

View file

@ -10,14 +10,10 @@ import (
"github.com/pkg/errors"
)
func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
attr := syscall.SysProcAttr{
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{
Setpgid: true,
}
if !nonewns {
attr.Cloneflags = syscall.CLONE_NEWNS
}
return &attr
}
func setCgroup(cgroupPath string, cmd *exec.Cmd) error {

View file

@ -7,7 +7,7 @@ import (
"syscall"
)
func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{
Setpgid: true,
}

View file

@ -7,8 +7,8 @@ import (
"path/filepath"
shimapi "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/mount"
ptypes "github.com/gogo/protobuf/types"
"golang.org/x/sys/unix"
)
// NewLocal returns a shim client implementation for issue commands to a shim
@ -32,7 +32,7 @@ func (c *local) Start(ctx context.Context, in *shimapi.StartRequest) (*shimapi.S
func (c *local) Delete(ctx context.Context, in *ptypes.Empty) (*shimapi.DeleteResponse, error) {
// make sure we unmount the containers rootfs for this local
if err := unix.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil {
if err := mount.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil {
return nil, err
}
return c.s.Delete(ctx, in)

View file

@ -37,12 +37,12 @@ func (s *containerStore) Get(ctx context.Context, id string) (containers.Contain
bkt := getContainerBucket(s.tx, namespace, id)
if bkt == nil {
return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "bucket name %q:%q", namespace, id)
return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace)
}
container := containers.Container{ID: id}
if err := readContainer(&container, bkt); err != nil {
return containers.Container{}, errors.Wrapf(err, "failed to read container %v", id)
return containers.Container{}, errors.Wrapf(err, "failed to read container %q", id)
}
return container, nil
@ -61,7 +61,7 @@ func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.C
bkt := getContainersBucket(s.tx, namespace)
if bkt == nil {
return nil, nil
return nil, nil // empty store
}
var m []containers.Container
@ -73,7 +73,7 @@ func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.C
container := containers.Container{ID: string(k)}
if err := readContainer(&container, cbkt); err != nil {
return errors.Wrap(err, "failed to read container")
return errors.Wrapf(err, "failed to read container %q", string(k))
}
if filter.Match(adaptContainer(container)) {
@ -113,7 +113,7 @@ func (s *containerStore) Create(ctx context.Context, container containers.Contai
container.CreatedAt = time.Now().UTC()
container.UpdatedAt = container.CreatedAt
if err := writeContainer(cbkt, &container); err != nil {
return containers.Container{}, errors.Wrap(err, "failed to write container")
return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
}
return container, nil
@ -131,7 +131,7 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai
bkt := getContainersBucket(s.tx, namespace)
if bkt == nil {
return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace)
}
cbkt := bkt.Bucket([]byte(container.ID))
@ -141,7 +141,7 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai
var updated containers.Container
if err := readContainer(&updated, cbkt); err != nil {
return updated, errors.Wrapf(err, "failed to read container from bucket")
return updated, errors.Wrapf(err, "failed to read container %q", container.ID)
}
createdat := updated.CreatedAt
updated.ID = container.ID
@ -211,7 +211,7 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai
updated.CreatedAt = createdat
updated.UpdatedAt = time.Now().UTC()
if err := writeContainer(cbkt, &updated); err != nil {
return containers.Container{}, errors.Wrap(err, "failed to write container")
return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
}
return updated, nil
@ -225,7 +225,7 @@ func (s *containerStore) Delete(ctx context.Context, id string) error {
bkt := getContainersBucket(s.tx, namespace)
if bkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %v, bucket not present", id)
return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace)
}
if err := bkt.DeleteBucket([]byte(id)); err == bolt.ErrBucketNotFound {
@ -236,7 +236,7 @@ func (s *containerStore) Delete(ctx context.Context, id string) error {
func validateContainer(container *containers.Container) error {
if err := identifiers.Validate(container.ID); err != nil {
return errors.Wrapf(err, "container.ID validation error")
return errors.Wrap(err, "container.ID")
}
for k := range container.Extensions {

View file

@ -138,7 +138,7 @@ func (m *DB) Init(ctx context.Context) error {
if err := m.migrate(tx); err != nil {
return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version)
}
log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("database migration to %s.%d finished", m.schema, m.version)
log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version)
}
}
@ -269,7 +269,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (stats GCStats, err error) {
stats.SnapshotD = map[string]time.Duration{}
wg.Add(len(m.dirtySS))
for snapshotterName := range m.dirtySS {
log.G(ctx).WithField("snapshotter", snapshotterName).Debug("scheduling snapshotter cleanup")
log.G(ctx).WithField("snapshotter", snapshotterName).Debug("schedule snapshotter cleanup")
go func(snapshotterName string) {
st1 := time.Now()
m.cleanupSnapshotter(snapshotterName)
@ -286,7 +286,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (stats GCStats, err error) {
if m.dirtyCS {
wg.Add(1)
log.G(ctx).Debug("scheduling content cleanup")
log.G(ctx).Debug("schedule content cleanup")
go func() {
ct1 := time.Now()
m.cleanupContent()

View file

@ -301,7 +301,7 @@ func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error {
cbkt = cbkt.Bucket(bucketKeyObjectBlob)
}
if cbkt != nil {
log.G(ctx).WithField("key", node.Key).Debug("delete content")
log.G(ctx).WithField("key", node.Key).Debug("remove content")
return cbkt.DeleteBucket([]byte(node.Key))
}
case ResourceSnapshot:
@ -313,7 +313,7 @@ func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error {
}
ssbkt := sbkt.Bucket([]byte(parts[0]))
if ssbkt != nil {
log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("delete snapshot")
log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("remove snapshot")
return ssbkt.DeleteBucket([]byte(parts[1]))
}
}

View file

@ -359,7 +359,8 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap
return update(ctx, s.db, func(tx *bolt.Tx) error {
bkt := getSnapshotterBucket(tx, ns, s.name)
if bkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
return errors.Wrapf(errdefs.ErrNotFound,
"can not find snapshotter %q", s.name)
}
bbkt, err := bkt.CreateBucket([]byte(name))
@ -722,7 +723,7 @@ func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error {
if !errdefs.IsFailedPrecondition(err) {
return err
}
logger.WithError(err).WithField("key", node.info.Name).Warnf("snapshot removal failed")
logger.WithError(err).WithField("key", node.info.Name).Warnf("failed to remove snapshot")
} else {
logger.WithField("key", node.info.Name).Debug("removed snapshot")
}

View file

@ -2,7 +2,9 @@ package mount
import (
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@ -42,8 +44,27 @@ func (m *Mount) Mount(target string) error {
}
// Unmount the provided mount path with the flags
func Unmount(mount string, flags int) error {
return unix.Unmount(mount, flags)
func Unmount(target string, flags int) error {
if err := unmount(target, flags); err != nil && err != unix.EINVAL {
return err
}
return nil
}
func unmount(target string, flags int) error {
for i := 0; i < 50; i++ {
if err := unix.Unmount(target, flags); err != nil {
switch err {
case unix.EBUSY:
time.Sleep(50 * time.Millisecond)
continue
default:
return err
}
}
return nil
}
return errors.Wrapf(unix.EBUSY, "failed to unmount target %s", target)
}
// UnmountAll repeatedly unmounts the given mount point until there
@ -51,7 +72,7 @@ func Unmount(mount string, flags int) error {
// useful for undoing a stack of mounts on the same mount point.
func UnmountAll(mount string, flags int) error {
for {
if err := Unmount(mount, flags); err != nil {
if err := unmount(mount, flags); err != nil {
// EINVAL is returned if the target is not a
// mount point, indicating that we are
// done. It can also indicate a few other

View file

@ -12,12 +12,11 @@ import (
"strconv"
"strings"
"golang.org/x/sys/unix"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/fs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/user"
@ -101,7 +100,7 @@ func WithImageConfig(image Image) SpecOpts {
parts := strings.Split(config.User, ":")
switch len(parts) {
case 1:
v, err := strconv.ParseUint(parts[0], 0, 10)
v, err := strconv.Atoi(parts[0])
if err != nil {
// if we cannot parse as a uint they try to see if it is a username
if err := WithUsername(config.User)(ctx, client, c, s); err != nil {
@ -113,13 +112,13 @@ func WithImageConfig(image Image) SpecOpts {
return err
}
case 2:
v, err := strconv.ParseUint(parts[0], 0, 10)
v, err := strconv.Atoi(parts[0])
if err != nil {
return err
return errors.Wrapf(err, "parse uid %s", parts[0])
}
uid := uint32(v)
if v, err = strconv.ParseUint(parts[1], 0, 10); err != nil {
return err
if v, err = strconv.Atoi(parts[1]); err != nil {
return errors.Wrapf(err, "parse gid %s", parts[1])
}
gid := uint32(v)
s.Process.User.UID, s.Process.User.GID = uid, gid
@ -260,7 +259,7 @@ func WithUIDGID(uid, gid uint32) SpecOpts {
// or uid is not found in /etc/passwd, it sets gid to be the same with
// uid, and not returns error.
func WithUserID(uid uint32) SpecOpts {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
if c.Snapshotter == "" {
return errors.Errorf("no snapshotter set for container")
}
@ -276,13 +275,19 @@ func WithUserID(uid uint32) SpecOpts {
if err != nil {
return err
}
defer os.RemoveAll(root)
defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
defer unix.Unmount(root, 0)
defer func() {
if uerr := mount.Unmount(root, 0); uerr != nil {
if err == nil {
err = uerr
}
}
}()
ppath, err := fs.RootPath(root, "/etc/passwd")
if err != nil {
return err
@ -317,7 +322,7 @@ func WithUserID(uid uint32) SpecOpts {
// does not exist, or the username is not found in /etc/passwd,
// it returns error.
func WithUsername(username string) SpecOpts {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
if c.Snapshotter == "" {
return errors.Errorf("no snapshotter set for container")
}
@ -333,13 +338,19 @@ func WithUsername(username string) SpecOpts {
if err != nil {
return err
}
defer os.RemoveAll(root)
defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
defer unix.Unmount(root, 0)
defer func() {
if uerr := mount.Unmount(root, 0); uerr != nil {
if err == nil {
err = uerr
}
}
}()
ppath, err := fs.RootPath(root, "/etc/passwd")
if err != nil {
return err

View file

@ -60,3 +60,11 @@ func WithTTY(width, height int) SpecOpts {
return nil
}
}
// WithUsername sets the username on the process
func WithUsername(username string) SpecOpts {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
s.Process.User.Username = username
return nil
}
}

View file

@ -55,10 +55,10 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap
_, err := sn.Stat(ctx, chainID.String())
if err == nil {
log.G(ctx).Debugf("Extraction not needed, layer snapshot exists")
log.G(ctx).Debugf("Extraction not needed, layer snapshot %s exists", chainID)
return false, nil
} else if !errdefs.IsNotFound(err) {
return false, errors.Wrap(err, "failed to stat snapshot")
return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID)
}
key := fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
@ -67,7 +67,7 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap
mounts, err := sn.Prepare(ctx, key, parent.String(), opts...)
if err != nil {
//TODO: If is snapshot exists error, retry
return false, errors.Wrap(err, "failed to prepare extraction layer")
return false, errors.Wrapf(err, "failed to prepare extraction snapshot %q", key)
}
defer func() {
if err != nil {
@ -89,7 +89,7 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap
if err = sn.Commit(ctx, chainID.String(), key, opts...); err != nil {
if !errdefs.IsAlreadyExists(err) {
return false, errors.Wrapf(err, "failed to commit snapshot %s", parent)
return false, errors.Wrapf(err, "failed to commit snapshot %s", key)
}
// Destination already exists, cleanup key and return without error

View file

@ -49,6 +49,8 @@ func (l *TaskList) Get(ctx context.Context, id string) (Task, error) {
// GetAll tasks under a namespace
func (l *TaskList) GetAll(ctx context.Context) ([]Task, error) {
l.mu.Lock()
defer l.mu.Unlock()
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err

View file

@ -277,7 +277,7 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStat
return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
}
func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creation) (Process, error) {
func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creation) (_ Process, err error) {
if id == "" {
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty")
}
@ -285,6 +285,12 @@ func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreat
if err != nil {
return nil, err
}
defer func() {
if err != nil && i != nil {
i.Cancel()
i.Close()
}
}()
any, err := typeurl.MarshalAny(spec)
if err != nil {
return nil, err

View file

@ -41,4 +41,4 @@ github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
github.com/dmcgowan/go-tar go1.10
github.com/stevvooe/ttrpc 8c92e22ce0c492875ccaac3ab06143a77d8ed0c1
github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f

23
vendor/github.com/stevvooe/ttrpc/config.go generated vendored Normal file
View file

@ -0,0 +1,23 @@
package ttrpc
import "github.com/pkg/errors"
type serverConfig struct {
handshaker Handshaker
}
type ServerOpt func(*serverConfig) error
// WithServerHandshaker can be passed to NewServer to ensure that the
// handshaker is called before every connection attempt.
//
// Only one handshaker is allowed per server.
func WithServerHandshaker(handshaker Handshaker) ServerOpt {
return func(c *serverConfig) error {
if c.handshaker != nil {
return errors.New("only one handshaker allowed per server")
}
c.handshaker = handshaker
return nil
}
}

34
vendor/github.com/stevvooe/ttrpc/handshake.go generated vendored Normal file
View file

@ -0,0 +1,34 @@
package ttrpc
import (
"context"
"net"
)
// Handshaker defines the interface for connection handshakes performed on the
// server or client when first connecting.
type Handshaker interface {
// Handshake should confirm or decorate a connection that may be incoming
// to a server or outgoing from a client.
//
// If this returns without an error, the caller should use the connection
// in place of the original connection.
//
// The second return value can contain credential specific data, such as
// unix socket credentials or TLS information.
//
// While we currently only have implementations on the server-side, this
// interface should be sufficient to implement similar handshakes on the
// client-side.
Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
}
type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
return fn(ctx, conn)
}
func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
return conn, nil, nil
}

View file

@ -2,6 +2,7 @@ package ttrpc
import (
"context"
"io"
"math/rand"
"net"
"sync"
@ -19,6 +20,7 @@ var (
)
type Server struct {
config *serverConfig
services *serviceSet
codec codec
@ -28,13 +30,21 @@ type Server struct {
done chan struct{} // marks point at which we stop serving requests
}
func NewServer() *Server {
func NewServer(opts ...ServerOpt) (*Server, error) {
config := &serverConfig{}
for _, opt := range opts {
if err := opt(config); err != nil {
return nil, err
}
}
return &Server{
config: config,
services: newServiceSet(),
done: make(chan struct{}),
listeners: make(map[net.Listener]struct{}),
connections: make(map[*serverConn]struct{}),
}
}, nil
}
func (s *Server) Register(name string, methods map[string]Method) {
@ -46,10 +56,15 @@ func (s *Server) Serve(l net.Listener) error {
defer s.closeListener(l)
var (
ctx = context.Background()
backoff time.Duration
ctx = context.Background()
backoff time.Duration
handshaker = s.config.handshaker
)
if handshaker == nil {
handshaker = handshakerFunc(noopHandshake)
}
for {
conn, err := l.Accept()
if err != nil {
@ -82,7 +97,15 @@ func (s *Server) Serve(l net.Listener) error {
}
backoff = 0
sc := s.newConn(conn)
approved, handshake, err := handshaker.Handshake(ctx, conn)
if err != nil {
log.L.WithError(err).Errorf("ttrpc: refusing connection after handshake")
conn.Close()
continue
}
sc := s.newConn(approved, handshake)
go sc.run(ctx)
}
}
@ -205,11 +228,12 @@ func (cs connState) String() string {
}
}
func (s *Server) newConn(conn net.Conn) *serverConn {
func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn {
c := &serverConn{
server: s,
conn: conn,
shutdown: make(chan struct{}),
server: s,
conn: conn,
handshake: handshake,
shutdown: make(chan struct{}),
}
c.setState(connStateIdle)
s.addConnection(c)
@ -217,9 +241,10 @@ func (s *Server) newConn(conn net.Conn) *serverConn {
}
type serverConn struct {
server *Server
conn net.Conn
state atomic.Value
server *Server
conn net.Conn
handshake interface{} // data from handshake, not used for now
state atomic.Value
shutdownOnce sync.Once
shutdown chan struct{} // forced shutdown, used by close
@ -406,7 +431,7 @@ func (c *serverConn) run(sctx context.Context) {
// branch. Basically, it means that we are no longer receiving
// requests due to a terminal error.
recvErr = nil // connection is now "closing"
if err != nil {
if err != nil && err != io.EOF {
log.L.WithError(err).Error("error receiving message")
}
case <-shutdown:

92
vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go generated vendored Normal file
View file

@ -0,0 +1,92 @@
package ttrpc
import (
"context"
"net"
"os"
"syscall"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
type UnixCredentialsFunc func(*unix.Ucred) error
func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
uc, err := requireUnixSocket(conn)
if err != nil {
return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: require unix socket")
}
rs, err := uc.SyscallConn()
if err != nil {
return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed")
}
var (
ucred *unix.Ucred
ucredErr error
)
if err := rs.Control(func(fd uintptr) {
ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED)
}); err != nil {
return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed")
}
if ucredErr != nil {
return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials")
}
if err := fn(ucred); err != nil {
return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: credential check failed")
}
return uc, ucred, nil
}
// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID.
//
// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an
// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001.
// So calling this function with uid=0 allows a connection from effective UID 0 but rejects
// a connection from effective UID 1001.
//
// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)."
func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc {
return func(ucred *unix.Ucred) error {
return requireUidGid(ucred, uid, gid)
}
}
func UnixSocketRequireRoot() UnixCredentialsFunc {
return UnixSocketRequireUidGid(0, 0)
}
// UnixSocketRequireSameUser resolves the current effective unix user and returns a
// UnixCredentialsFunc that will validate incoming unix connections against the
// current credentials.
//
// This is useful when using abstract sockets that are accessible by all users.
func UnixSocketRequireSameUser() UnixCredentialsFunc {
euid, egid := os.Geteuid(), os.Getegid()
return UnixSocketRequireUidGid(euid, egid)
}
func requireRoot(ucred *unix.Ucred) error {
return requireUidGid(ucred, 0, 0)
}
func requireUidGid(ucred *unix.Ucred, uid, gid int) error {
if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) {
return errors.Wrap(syscall.EPERM, "ttrpc: invalid credentials")
}
return nil
}
func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) {
uc, ok := conn.(*net.UnixConn)
if !ok {
return nil, errors.New("a unix socket connection is required")
}
return uc, nil
}