2017-09-22 09:52:41 -04:00
|
|
|
// +build !windows
|
|
|
|
|
2018-02-05 16:05:59 -05:00
|
|
|
package libcontainerd // import "github.com/docker/docker/libcontainerd"
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"reflect"
|
|
|
|
"runtime"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"google.golang.org/grpc"
|
2017-11-14 17:00:47 -05:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
"github.com/containerd/containerd"
|
2017-11-29 19:15:20 -05:00
|
|
|
"github.com/containerd/containerd/api/events"
|
2017-09-22 09:52:41 -04:00
|
|
|
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
|
|
|
"github.com/containerd/containerd/api/types"
|
|
|
|
"github.com/containerd/containerd/archive"
|
2017-11-29 19:15:20 -05:00
|
|
|
"github.com/containerd/containerd/cio"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/containerd/containerd/content"
|
2017-11-28 23:09:37 -05:00
|
|
|
containerderrors "github.com/containerd/containerd/errdefs"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/containerd/containerd/images"
|
2017-11-29 19:15:20 -05:00
|
|
|
"github.com/containerd/containerd/linux/runctypes"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/containerd/typeurl"
|
2018-01-11 14:53:06 -05:00
|
|
|
"github.com/docker/docker/errdefs"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
|
|
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
2018-04-10 11:05:11 -04:00
|
|
|
"github.com/opencontainers/runtime-spec/specs-go"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
)
|
|
|
|
|
|
|
|
// InitProcessName is the name given to the first process of a
|
|
|
|
// container
|
|
|
|
const InitProcessName = "init"
|
|
|
|
|
|
|
|
type container struct {
|
2017-12-15 11:32:08 -05:00
|
|
|
mu sync.Mutex
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
bundleDir string
|
|
|
|
ctr containerd.Container
|
|
|
|
task containerd.Task
|
|
|
|
execs map[string]containerd.Process
|
|
|
|
oomKilled bool
|
|
|
|
}
|
|
|
|
|
2017-12-15 11:32:08 -05:00
|
|
|
func (c *container) setTask(t containerd.Task) {
|
|
|
|
c.mu.Lock()
|
|
|
|
c.task = t
|
|
|
|
c.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *container) getTask() containerd.Task {
|
|
|
|
c.mu.Lock()
|
|
|
|
t := c.task
|
|
|
|
c.mu.Unlock()
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *container) addProcess(id string, p containerd.Process) {
|
|
|
|
c.mu.Lock()
|
|
|
|
if c.execs == nil {
|
|
|
|
c.execs = make(map[string]containerd.Process)
|
|
|
|
}
|
|
|
|
c.execs[id] = p
|
|
|
|
c.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *container) deleteProcess(id string) {
|
|
|
|
c.mu.Lock()
|
|
|
|
delete(c.execs, id)
|
|
|
|
c.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *container) getProcess(id string) containerd.Process {
|
|
|
|
c.mu.Lock()
|
|
|
|
p := c.execs[id]
|
|
|
|
c.mu.Unlock()
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *container) setOOMKilled(killed bool) {
|
|
|
|
c.mu.Lock()
|
|
|
|
c.oomKilled = killed
|
|
|
|
c.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *container) getOOMKilled() bool {
|
|
|
|
c.mu.Lock()
|
|
|
|
killed := c.oomKilled
|
|
|
|
c.mu.Unlock()
|
|
|
|
return killed
|
|
|
|
}
|
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
type client struct {
|
|
|
|
sync.RWMutex // protects containers map
|
|
|
|
|
|
|
|
remote *containerd.Client
|
|
|
|
stateDir string
|
|
|
|
logger *logrus.Entry
|
|
|
|
|
|
|
|
namespace string
|
|
|
|
backend Backend
|
|
|
|
eventQ queue
|
|
|
|
containers map[string]*container
|
|
|
|
}
|
|
|
|
|
2018-03-23 14:25:53 -04:00
|
|
|
func (c *client) reconnect() error {
|
|
|
|
c.Lock()
|
|
|
|
err := c.remote.Reconnect()
|
|
|
|
c.Unlock()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-31 17:32:40 -05:00
|
|
|
func (c *client) setRemote(remote *containerd.Client) {
|
|
|
|
c.Lock()
|
|
|
|
c.remote = remote
|
|
|
|
c.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) getRemote() *containerd.Client {
|
|
|
|
c.RLock()
|
|
|
|
remote := c.remote
|
|
|
|
c.RUnlock()
|
|
|
|
return remote
|
|
|
|
}
|
|
|
|
|
2017-11-02 20:21:18 -04:00
|
|
|
func (c *client) Version(ctx context.Context) (containerd.Version, error) {
|
2018-01-31 17:32:40 -05:00
|
|
|
return c.getRemote().Version(ctx)
|
2017-11-02 20:21:18 -04:00
|
|
|
}
|
|
|
|
|
2018-04-10 11:05:11 -04:00
|
|
|
// Restore loads the containerd container.
|
|
|
|
// It should not be called concurrently with any other operation for the given ID.
|
2017-09-22 09:52:41 -04:00
|
|
|
func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (alive bool, pid int, err error) {
|
|
|
|
c.Lock()
|
2018-04-10 11:05:11 -04:00
|
|
|
_, ok := c.containers[id]
|
|
|
|
if ok {
|
|
|
|
c.Unlock()
|
|
|
|
return false, 0, errors.WithStack(newConflictError("id already in use"))
|
|
|
|
}
|
|
|
|
|
|
|
|
cntr := &container{}
|
|
|
|
c.containers[id] = cntr
|
|
|
|
cntr.mu.Lock()
|
|
|
|
defer cntr.mu.Unlock()
|
|
|
|
|
|
|
|
c.Unlock()
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
c.Lock()
|
|
|
|
delete(c.containers, id)
|
|
|
|
c.Unlock()
|
|
|
|
}
|
|
|
|
}()
|
2017-09-22 09:52:41 -04:00
|
|
|
|
2018-01-04 16:12:23 -05:00
|
|
|
var dio *cio.DirectIO
|
2017-09-22 09:52:41 -04:00
|
|
|
defer func() {
|
2018-01-04 16:12:23 -05:00
|
|
|
if err != nil && dio != nil {
|
|
|
|
dio.Cancel()
|
|
|
|
dio.Close()
|
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
err = wrapError(err)
|
|
|
|
}()
|
|
|
|
|
2018-04-10 11:05:11 -04:00
|
|
|
ctr, err := c.getRemote().LoadContainer(ctx, id)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2018-04-10 11:05:11 -04:00
|
|
|
return false, -1, errors.WithStack(wrapError(err))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2018-01-04 16:12:23 -05:00
|
|
|
attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
|
|
|
|
// dio must be assigned to the previously defined dio for the defer above
|
|
|
|
// to handle cleanup
|
|
|
|
dio, err = cio.NewDirectIO(ctx, fifos)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-01-04 16:12:23 -05:00
|
|
|
return attachStdio(dio)
|
|
|
|
}
|
|
|
|
t, err := ctr.Task(ctx, attachIO)
|
2018-02-08 12:57:38 -05:00
|
|
|
if err != nil && !containerderrors.IsNotFound(err) {
|
2018-04-10 11:05:11 -04:00
|
|
|
return false, -1, errors.Wrap(wrapError(err), "error getting containerd task for container")
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if t != nil {
|
|
|
|
s, err := t.Status(ctx)
|
|
|
|
if err != nil {
|
2018-04-10 11:05:11 -04:00
|
|
|
return false, -1, errors.Wrap(wrapError(err), "error getting task status")
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
alive = s.Status != containerd.Stopped
|
|
|
|
pid = int(t.Pid())
|
|
|
|
}
|
2018-04-10 11:05:11 -04:00
|
|
|
|
|
|
|
cntr.bundleDir = filepath.Join(c.stateDir, id)
|
|
|
|
cntr.ctr = ctr
|
|
|
|
cntr.task = t
|
|
|
|
// TODO(mlaventure): load execs
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
c.logger.WithFields(logrus.Fields{
|
|
|
|
"container": id,
|
|
|
|
"alive": alive,
|
|
|
|
"pid": pid,
|
|
|
|
}).Debug("restored container")
|
|
|
|
|
|
|
|
return alive, pid, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, runtimeOptions interface{}) error {
|
|
|
|
if ctr := c.getContainer(id); ctr != nil {
|
|
|
|
return errors.WithStack(newConflictError("id already in use"))
|
|
|
|
}
|
|
|
|
|
|
|
|
bdir, err := prepareBundleDir(filepath.Join(c.stateDir, id), ociSpec)
|
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrap(err, "prepare bundle dir failed"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
|
|
|
|
|
2018-01-31 17:32:40 -05:00
|
|
|
cdCtr, err := c.getRemote().NewContainer(ctx, id,
|
2017-09-22 09:52:41 -04:00
|
|
|
containerd.WithSpec(ociSpec),
|
|
|
|
// TODO(mlaventure): when containerd support lcow, revisit runtime value
|
|
|
|
containerd.WithRuntime(fmt.Sprintf("io.containerd.runtime.v1.%s", runtime.GOOS), runtimeOptions))
|
|
|
|
if err != nil {
|
2018-03-27 10:03:53 -04:00
|
|
|
return wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
c.Lock()
|
|
|
|
c.containers[id] = &container{
|
|
|
|
bundleDir: bdir,
|
|
|
|
ctr: cdCtr,
|
|
|
|
}
|
|
|
|
c.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start create and start a task for the specified containerd id
|
|
|
|
func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio StdioCallback) (int, error) {
|
|
|
|
ctr := c.getContainer(id)
|
2017-12-15 11:32:08 -05:00
|
|
|
if ctr == nil {
|
2017-09-22 09:52:41 -04:00
|
|
|
return -1, errors.WithStack(newNotFoundError("no such container"))
|
2017-12-15 11:32:08 -05:00
|
|
|
}
|
|
|
|
if t := ctr.getTask(); t != nil {
|
2017-09-22 09:52:41 -04:00
|
|
|
return -1, errors.WithStack(newConflictError("container already started"))
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
cp *types.Descriptor
|
|
|
|
t containerd.Task
|
2017-11-29 19:15:20 -05:00
|
|
|
rio cio.IO
|
2017-09-22 09:52:41 -04:00
|
|
|
err error
|
|
|
|
stdinCloseSync = make(chan struct{})
|
|
|
|
)
|
|
|
|
|
|
|
|
if checkpointDir != "" {
|
|
|
|
// write checkpoint to the content store
|
|
|
|
tar := archive.Diff(ctx, "", checkpointDir)
|
|
|
|
cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
|
|
|
|
// remove the checkpoint when we're done
|
|
|
|
defer func() {
|
|
|
|
if cp != nil {
|
2018-01-31 17:32:40 -05:00
|
|
|
err := c.getRemote().ContentStore().Delete(context.Background(), cp.Digest)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
c.logger.WithError(err).WithFields(logrus.Fields{
|
|
|
|
"ref": checkpointDir,
|
|
|
|
"digest": cp.Digest,
|
|
|
|
}).Warnf("failed to delete temporary checkpoint entry")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if err := tar.Close(); err != nil {
|
|
|
|
return -1, errors.Wrap(err, "failed to close checkpoint tar stream")
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spec, err := ctr.ctr.Spec(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return -1, errors.Wrap(err, "failed to retrieve spec")
|
|
|
|
}
|
|
|
|
uid, gid := getSpecUser(spec)
|
|
|
|
t, err = ctr.ctr.NewTask(ctx,
|
2017-11-29 19:15:20 -05:00
|
|
|
func(id string) (cio.IO, error) {
|
2017-12-07 14:26:27 -05:00
|
|
|
fifos := newFIFOSet(ctr.bundleDir, InitProcessName, withStdin, spec.Process.Terminal)
|
2017-11-29 19:15:20 -05:00
|
|
|
rio, err = c.createIO(fifos, id, InitProcessName, stdinCloseSync, attachStdio)
|
|
|
|
return rio, err
|
2017-09-22 09:52:41 -04:00
|
|
|
},
|
|
|
|
func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
|
|
|
|
info.Checkpoint = cp
|
2017-11-29 19:15:20 -05:00
|
|
|
info.Options = &runctypes.CreateOptions{
|
2018-01-08 12:01:03 -05:00
|
|
|
IoUid: uint32(uid),
|
|
|
|
IoGid: uint32(gid),
|
|
|
|
NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "",
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
close(stdinCloseSync)
|
2017-11-29 19:15:20 -05:00
|
|
|
if rio != nil {
|
|
|
|
rio.Cancel()
|
|
|
|
rio.Close()
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
2018-03-27 10:03:53 -04:00
|
|
|
return -1, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2017-12-15 11:32:08 -05:00
|
|
|
ctr.setTask(t)
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
// Signal c.createIO that it can call CloseIO
|
|
|
|
close(stdinCloseSync)
|
|
|
|
|
|
|
|
if err := t.Start(ctx); err != nil {
|
|
|
|
if _, err := t.Delete(ctx); err != nil {
|
|
|
|
c.logger.WithError(err).WithField("container", id).
|
|
|
|
Error("failed to delete task after fail start")
|
|
|
|
}
|
2017-12-15 11:32:08 -05:00
|
|
|
ctr.setTask(nil)
|
2018-03-27 10:03:53 -04:00
|
|
|
return -1, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return int(t.Pid()), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) {
|
|
|
|
ctr := c.getContainer(containerID)
|
2017-12-15 11:32:08 -05:00
|
|
|
if ctr == nil {
|
2017-09-22 09:52:41 -04:00
|
|
|
return -1, errors.WithStack(newNotFoundError("no such container"))
|
2017-12-15 11:32:08 -05:00
|
|
|
}
|
|
|
|
t := ctr.getTask()
|
|
|
|
if t == nil {
|
2017-09-22 09:52:41 -04:00
|
|
|
return -1, errors.WithStack(newInvalidParameterError("container is not running"))
|
2017-12-15 11:32:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if p := ctr.getProcess(processID); p != nil {
|
2017-09-22 09:52:41 -04:00
|
|
|
return -1, errors.WithStack(newConflictError("id already in use"))
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
p containerd.Process
|
2017-11-29 19:15:20 -05:00
|
|
|
rio cio.IO
|
2017-09-22 09:52:41 -04:00
|
|
|
err error
|
|
|
|
stdinCloseSync = make(chan struct{})
|
|
|
|
)
|
2017-11-13 17:53:56 -05:00
|
|
|
|
2017-12-07 14:26:27 -05:00
|
|
|
fifos := newFIFOSet(ctr.bundleDir, processID, withStdin, spec.Terminal)
|
2017-11-13 17:53:56 -05:00
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2017-11-29 19:15:20 -05:00
|
|
|
if rio != nil {
|
|
|
|
rio.Cancel()
|
|
|
|
rio.Close()
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2017-12-15 11:32:08 -05:00
|
|
|
p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
|
2017-11-29 19:15:20 -05:00
|
|
|
rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
|
|
|
|
return rio, err
|
2017-09-22 09:52:41 -04:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
close(stdinCloseSync)
|
2018-03-27 10:03:53 -04:00
|
|
|
return -1, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2017-12-15 11:32:08 -05:00
|
|
|
ctr.addProcess(processID, p)
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
// Signal c.createIO that it can call CloseIO
|
|
|
|
close(stdinCloseSync)
|
|
|
|
|
|
|
|
if err = p.Start(ctx); err != nil {
|
|
|
|
p.Delete(context.Background())
|
2017-12-15 11:32:08 -05:00
|
|
|
ctr.deleteProcess(processID)
|
2018-03-27 10:03:53 -04:00
|
|
|
return -1, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return int(p.Pid()), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error {
|
|
|
|
p, err := c.getProcess(containerID, processID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-12-15 10:00:15 -05:00
|
|
|
return wrapError(p.Kill(ctx, syscall.Signal(signal)))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
|
|
|
|
p, err := c.getProcess(containerID, processID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return p.Resize(ctx, uint32(width), uint32(height))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error {
|
|
|
|
p, err := c.getProcess(containerID, processID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return p.CloseIO(ctx, containerd.WithStdinCloser)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) Pause(ctx context.Context, containerID string) error {
|
|
|
|
p, err := c.getProcess(containerID, InitProcessName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-03-27 10:03:53 -04:00
|
|
|
return wrapError(p.(containerd.Task).Pause(ctx))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) Resume(ctx context.Context, containerID string) error {
|
|
|
|
p, err := c.getProcess(containerID, InitProcessName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return p.(containerd.Task).Resume(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) Stats(ctx context.Context, containerID string) (*Stats, error) {
|
|
|
|
p, err := c.getProcess(containerID, InitProcessName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
m, err := p.(containerd.Task).Metrics(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
v, err := typeurl.UnmarshalAny(m.Data)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return interfaceToStats(m.Timestamp, v), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) {
|
|
|
|
p, err := c.getProcess(containerID, InitProcessName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pis, err := p.(containerd.Task).Pids(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var pids []uint32
|
|
|
|
for _, i := range pis {
|
|
|
|
pids = append(pids, i.Pid)
|
|
|
|
}
|
|
|
|
|
|
|
|
return pids, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) Summary(ctx context.Context, containerID string) ([]Summary, error) {
|
|
|
|
p, err := c.getProcess(containerID, InitProcessName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pis, err := p.(containerd.Task).Pids(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var infos []Summary
|
|
|
|
for _, pi := range pis {
|
|
|
|
i, err := typeurl.UnmarshalAny(pi.Info)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "unable to decode process details")
|
|
|
|
}
|
|
|
|
s, err := summaryFromInterface(i)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
infos = append(infos, *s)
|
|
|
|
}
|
|
|
|
|
|
|
|
return infos, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
|
|
|
|
p, err := c.getProcess(containerID, InitProcessName)
|
|
|
|
if err != nil {
|
|
|
|
return 255, time.Now(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
status, err := p.(containerd.Task).Delete(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return 255, time.Now(), nil
|
|
|
|
}
|
|
|
|
|
2017-12-15 11:32:08 -05:00
|
|
|
if ctr := c.getContainer(containerID); ctr != nil {
|
|
|
|
ctr.setTask(nil)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
return status.ExitCode(), status.ExitTime(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) Delete(ctx context.Context, containerID string) error {
|
|
|
|
ctr := c.getContainer(containerID)
|
|
|
|
if ctr == nil {
|
|
|
|
return errors.WithStack(newNotFoundError("no such container"))
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ctr.ctr.Delete(ctx); err != nil {
|
2018-03-27 10:03:53 -04:00
|
|
|
return wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2017-11-13 17:53:56 -05:00
|
|
|
if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
|
2017-09-22 09:52:41 -04:00
|
|
|
if err := os.RemoveAll(ctr.bundleDir); err != nil {
|
|
|
|
c.logger.WithError(err).WithFields(logrus.Fields{
|
|
|
|
"container": containerID,
|
|
|
|
"bundle": ctr.bundleDir,
|
|
|
|
}).Error("failed to remove state dir")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.removeContainer(containerID)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) Status(ctx context.Context, containerID string) (Status, error) {
|
|
|
|
ctr := c.getContainer(containerID)
|
|
|
|
if ctr == nil {
|
|
|
|
return StatusUnknown, errors.WithStack(newNotFoundError("no such container"))
|
|
|
|
}
|
|
|
|
|
2017-12-15 11:32:08 -05:00
|
|
|
t := ctr.getTask()
|
|
|
|
if t == nil {
|
|
|
|
return StatusUnknown, errors.WithStack(newNotFoundError("no such task"))
|
|
|
|
}
|
|
|
|
|
|
|
|
s, err := t.Status(ctx)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2018-03-27 10:03:53 -04:00
|
|
|
return StatusUnknown, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return Status(s.Status), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
|
|
|
|
p, err := c.getProcess(containerID, InitProcessName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
img, err := p.(containerd.Task).Checkpoint(ctx)
|
|
|
|
if err != nil {
|
2018-03-27 10:03:53 -04:00
|
|
|
return wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
// Whatever happens, delete the checkpoint from containerd
|
|
|
|
defer func() {
|
2018-01-31 17:32:40 -05:00
|
|
|
err := c.getRemote().ImageService().Delete(context.Background(), img.Name())
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
c.logger.WithError(err).WithField("digest", img.Target().Digest).
|
|
|
|
Warnf("failed to delete checkpoint image")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-01-31 17:32:40 -05:00
|
|
|
b, err := content.ReadBlob(ctx, c.getRemote().ContentStore(), img.Target().Digest)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
var index v1.Index
|
|
|
|
if err := json.Unmarshal(b, &index); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var cpDesc *v1.Descriptor
|
|
|
|
for _, m := range index.Manifests {
|
|
|
|
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
|
|
|
|
cpDesc = &m
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if cpDesc == nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2018-01-31 17:32:40 -05:00
|
|
|
rat, err := c.getRemote().ContentStore().ReaderAt(ctx, cpDesc.Digest)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
defer rat.Close()
|
|
|
|
_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
|
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) getContainer(id string) *container {
|
|
|
|
c.RLock()
|
|
|
|
ctr := c.containers[id]
|
|
|
|
c.RUnlock()
|
|
|
|
|
|
|
|
return ctr
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) removeContainer(id string) {
|
|
|
|
c.Lock()
|
|
|
|
delete(c.containers, id)
|
|
|
|
c.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) getProcess(containerID, processID string) (containerd.Process, error) {
|
|
|
|
ctr := c.getContainer(containerID)
|
2017-12-15 11:32:08 -05:00
|
|
|
if ctr == nil {
|
2017-09-22 09:52:41 -04:00
|
|
|
return nil, errors.WithStack(newNotFoundError("no such container"))
|
2017-12-15 11:32:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
t := ctr.getTask()
|
|
|
|
if t == nil {
|
2017-09-22 09:52:41 -04:00
|
|
|
return nil, errors.WithStack(newNotFoundError("container is not running"))
|
2017-12-15 11:32:08 -05:00
|
|
|
}
|
|
|
|
if processID == InitProcessName {
|
|
|
|
return t, nil
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2017-12-15 11:32:08 -05:00
|
|
|
p := ctr.getProcess(processID)
|
2017-09-22 09:52:41 -04:00
|
|
|
if p == nil {
|
|
|
|
return nil, errors.WithStack(newNotFoundError("no such exec"))
|
|
|
|
}
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createIO creates the io to be used by a process
|
|
|
|
// This needs to get a pointer to interface as upon closure the process may not have yet been registered
|
2017-11-29 19:15:20 -05:00
|
|
|
func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio StdioCallback) (cio.IO, error) {
|
2017-12-07 14:26:27 -05:00
|
|
|
io, err := cio.NewDirectIO(context.Background(), fifos)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if io.Stdin != nil {
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
stdinOnce sync.Once
|
|
|
|
)
|
|
|
|
pipe := io.Stdin
|
|
|
|
io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
|
|
|
|
stdinOnce.Do(func() {
|
|
|
|
err = pipe.Close()
|
|
|
|
// Do the rest in a new routine to avoid a deadlock if the
|
|
|
|
// Exec/Start call failed.
|
|
|
|
go func() {
|
|
|
|
<-stdinCloseSync
|
|
|
|
p, err := c.getProcess(containerID, processID)
|
|
|
|
if err == nil {
|
|
|
|
err = p.CloseIO(context.Background(), containerd.WithStdinCloser)
|
|
|
|
if err != nil && strings.Contains(err.Error(), "transport is closing") {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-11-29 19:15:20 -05:00
|
|
|
rio, err := attachStdio(io)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
io.Cancel()
|
|
|
|
io.Close()
|
|
|
|
}
|
2017-11-29 19:15:20 -05:00
|
|
|
return rio, err
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) {
|
|
|
|
c.eventQ.append(ei.ContainerID, func() {
|
|
|
|
err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.WithError(err).WithFields(logrus.Fields{
|
|
|
|
"container": ei.ContainerID,
|
|
|
|
"event": et,
|
|
|
|
"event-info": ei,
|
|
|
|
}).Error("failed to process event")
|
|
|
|
}
|
|
|
|
|
|
|
|
if et == EventExit && ei.ProcessID != ei.ContainerID {
|
2017-12-15 11:32:08 -05:00
|
|
|
p := ctr.getProcess(ei.ProcessID)
|
2017-09-22 09:52:41 -04:00
|
|
|
if p == nil {
|
|
|
|
c.logger.WithError(errors.New("no such process")).
|
|
|
|
WithFields(logrus.Fields{
|
|
|
|
"container": ei.ContainerID,
|
|
|
|
"process": ei.ProcessID,
|
|
|
|
}).Error("exit event")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
_, err = p.Delete(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
c.logger.WithError(err).WithFields(logrus.Fields{
|
|
|
|
"container": ei.ContainerID,
|
|
|
|
"process": ei.ProcessID,
|
|
|
|
}).Warn("failed to delete process")
|
|
|
|
}
|
2017-12-15 11:32:08 -05:00
|
|
|
ctr.deleteProcess(ei.ProcessID)
|
|
|
|
|
2017-11-13 17:53:56 -05:00
|
|
|
ctr := c.getContainer(ei.ContainerID)
|
|
|
|
if ctr == nil {
|
|
|
|
c.logger.WithFields(logrus.Fields{
|
|
|
|
"container": ei.ContainerID,
|
|
|
|
}).Error("failed to find container")
|
|
|
|
} else {
|
2017-12-07 15:52:14 -05:00
|
|
|
newFIFOSet(ctr.bundleDir, ei.ProcessID, true, false).Close()
|
2017-11-13 17:53:56 -05:00
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) processEventStream(ctx context.Context) {
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
eventStream eventsapi.Events_SubscribeClient
|
|
|
|
ev *eventsapi.Envelope
|
|
|
|
et EventType
|
|
|
|
ei EventInfo
|
|
|
|
ctr *container
|
|
|
|
)
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
c.logger.WithError(ctx.Err()).
|
|
|
|
Info("stopping event stream following graceful shutdown")
|
|
|
|
default:
|
|
|
|
go c.processEventStream(ctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-01-31 17:32:40 -05:00
|
|
|
eventStream, err = c.getRemote().EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
|
2017-12-05 09:55:33 -05:00
|
|
|
Filters: []string{
|
2017-12-28 20:47:56 -05:00
|
|
|
// Filter on both namespace *and* topic. To create an "and" filter,
|
|
|
|
// this must be a single, comma-separated string
|
|
|
|
"namespace==" + c.namespace + ",topic~=|^/tasks/|",
|
2017-12-05 09:55:33 -05:00
|
|
|
},
|
2017-09-22 09:52:41 -04:00
|
|
|
}, grpc.FailFast(false))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-31 17:32:40 -05:00
|
|
|
c.logger.WithField("namespace", c.namespace).Debug("processing event stream")
|
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
var oomKilled bool
|
|
|
|
for {
|
|
|
|
ev, err = eventStream.Recv()
|
|
|
|
if err != nil {
|
2017-11-14 17:00:47 -05:00
|
|
|
errStatus, ok := status.FromError(err)
|
|
|
|
if !ok || errStatus.Code() != codes.Canceled {
|
|
|
|
c.logger.WithError(err).Error("failed to get event")
|
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if ev.Event == nil {
|
|
|
|
c.logger.WithField("event", ev).Warn("invalid event")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
v, err := typeurl.UnmarshalAny(ev.Event)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
c.logger.WithField("topic", ev.Topic).Debug("event")
|
|
|
|
|
|
|
|
switch t := v.(type) {
|
2017-11-29 19:15:20 -05:00
|
|
|
case *events.TaskCreate:
|
2017-09-22 09:52:41 -04:00
|
|
|
et = EventCreate
|
|
|
|
ei = EventInfo{
|
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ContainerID,
|
|
|
|
Pid: t.Pid,
|
|
|
|
}
|
2017-11-29 19:15:20 -05:00
|
|
|
case *events.TaskStart:
|
2017-09-22 09:52:41 -04:00
|
|
|
et = EventStart
|
|
|
|
ei = EventInfo{
|
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ContainerID,
|
|
|
|
Pid: t.Pid,
|
|
|
|
}
|
2017-11-29 19:15:20 -05:00
|
|
|
case *events.TaskExit:
|
2017-09-22 09:52:41 -04:00
|
|
|
et = EventExit
|
|
|
|
ei = EventInfo{
|
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ID,
|
|
|
|
Pid: t.Pid,
|
|
|
|
ExitCode: t.ExitStatus,
|
|
|
|
ExitedAt: t.ExitedAt,
|
|
|
|
}
|
2017-11-29 19:15:20 -05:00
|
|
|
case *events.TaskOOM:
|
2017-09-22 09:52:41 -04:00
|
|
|
et = EventOOM
|
|
|
|
ei = EventInfo{
|
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
OOMKilled: true,
|
|
|
|
}
|
|
|
|
oomKilled = true
|
2017-11-29 19:15:20 -05:00
|
|
|
case *events.TaskExecAdded:
|
2017-09-22 09:52:41 -04:00
|
|
|
et = EventExecAdded
|
|
|
|
ei = EventInfo{
|
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ExecID,
|
|
|
|
}
|
2017-11-29 19:15:20 -05:00
|
|
|
case *events.TaskExecStarted:
|
2017-09-22 09:52:41 -04:00
|
|
|
et = EventExecStarted
|
|
|
|
ei = EventInfo{
|
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ExecID,
|
|
|
|
Pid: t.Pid,
|
|
|
|
}
|
2017-11-29 19:15:20 -05:00
|
|
|
case *events.TaskPaused:
|
2017-09-22 09:52:41 -04:00
|
|
|
et = EventPaused
|
|
|
|
ei = EventInfo{
|
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
}
|
2017-11-29 19:15:20 -05:00
|
|
|
case *events.TaskResumed:
|
2017-09-22 09:52:41 -04:00
|
|
|
et = EventResumed
|
|
|
|
ei = EventInfo{
|
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
c.logger.WithFields(logrus.Fields{
|
|
|
|
"topic": ev.Topic,
|
|
|
|
"type": reflect.TypeOf(t)},
|
|
|
|
).Info("ignoring event")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
ctr = c.getContainer(ei.ContainerID)
|
|
|
|
if ctr == nil {
|
|
|
|
c.logger.WithField("container", ei.ContainerID).Warn("unknown container")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if oomKilled {
|
2017-12-15 11:32:08 -05:00
|
|
|
ctr.setOOMKilled(true)
|
2017-09-22 09:52:41 -04:00
|
|
|
oomKilled = false
|
|
|
|
}
|
2017-12-15 11:32:08 -05:00
|
|
|
ei.OOMKilled = ctr.getOOMKilled()
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
c.processEvent(ctr, et, ei)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
|
2018-01-31 17:32:40 -05:00
|
|
|
writer, err := c.getRemote().ContentStore().Writer(ctx, ref, 0, "")
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer writer.Close()
|
|
|
|
size, err := io.Copy(writer, r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
labels := map[string]string{
|
|
|
|
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
|
|
|
|
}
|
|
|
|
if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &types.Descriptor{
|
|
|
|
MediaType: mediaType,
|
|
|
|
Digest: writer.Digest(),
|
|
|
|
Size_: size,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func wrapError(err error) error {
|
2017-12-15 10:00:15 -05:00
|
|
|
switch {
|
2018-01-04 16:12:23 -05:00
|
|
|
case err == nil:
|
|
|
|
return nil
|
2017-11-28 23:09:37 -05:00
|
|
|
case containerderrors.IsNotFound(err):
|
|
|
|
return errdefs.NotFound(err)
|
2017-12-15 10:00:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
msg := err.Error()
|
|
|
|
for _, s := range []string{"container does not exist", "not found", "no such container"} {
|
|
|
|
if strings.Contains(msg, s) {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.NotFound(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|