Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
package remote // import "github.com/docker/docker/libcontainerd/remote"
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/json"
|
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"reflect"
|
|
|
|
"runtime"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/containerd/containerd"
|
2018-04-18 17:55:50 -04:00
|
|
|
apievents "github.com/containerd/containerd/api/events"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/containerd/containerd/api/types"
|
|
|
|
"github.com/containerd/containerd/archive"
|
2017-11-29 19:15:20 -05:00
|
|
|
"github.com/containerd/containerd/cio"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/containerd/containerd/content"
|
2017-11-28 23:09:37 -05:00
|
|
|
containerderrors "github.com/containerd/containerd/errdefs"
|
2018-04-18 17:55:50 -04:00
|
|
|
"github.com/containerd/containerd/events"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/containerd/containerd/images"
|
2019-11-05 02:10:19 -05:00
|
|
|
v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/containerd/typeurl"
|
2018-01-11 14:53:06 -05:00
|
|
|
"github.com/docker/docker/errdefs"
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
"github.com/docker/docker/libcontainerd/queue"
|
|
|
|
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
2018-12-17 05:22:37 -05:00
|
|
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
2018-06-08 20:39:07 -04:00
|
|
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
2017-09-22 09:52:41 -04:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/sirupsen/logrus"
|
2018-06-08 20:39:07 -04:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
2017-09-22 09:52:41 -04:00
|
|
|
)
|
|
|
|
|
2019-03-25 16:17:17 -04:00
|
|
|
// DockerContainerBundlePath is the label key pointing to the container's bundle path
|
|
|
|
const DockerContainerBundlePath = "com.docker/engine.bundle.path"
|
2017-12-15 11:32:08 -05:00
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
type client struct {
|
2018-05-23 15:15:21 -04:00
|
|
|
client *containerd.Client
|
2017-09-22 09:52:41 -04:00
|
|
|
stateDir string
|
|
|
|
logger *logrus.Entry
|
2018-05-23 15:15:21 -04:00
|
|
|
ns string
|
2017-09-22 09:52:41 -04:00
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
backend libcontainerdtypes.Backend
|
|
|
|
eventQ queue.Queue
|
|
|
|
}
|
|
|
|
|
|
|
|
type container struct {
|
|
|
|
client *client
|
|
|
|
c8dCtr containerd.Container
|
|
|
|
|
|
|
|
v2runcoptions *v2runcoptions.Options
|
|
|
|
}
|
|
|
|
|
|
|
|
type task struct {
|
|
|
|
containerd.Task
|
|
|
|
ctr *container
|
|
|
|
}
|
|
|
|
|
|
|
|
type process struct {
|
|
|
|
containerd.Process
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2018-05-23 15:15:21 -04:00
|
|
|
// NewClient creates a new libcontainerd client from a containerd client
|
2020-07-07 16:33:46 -04:00
|
|
|
func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
|
2018-05-23 15:15:21 -04:00
|
|
|
c := &client{
|
2022-05-10 15:59:00 -04:00
|
|
|
client: cli,
|
|
|
|
stateDir: stateDir,
|
|
|
|
logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns),
|
|
|
|
ns: ns,
|
|
|
|
backend: b,
|
2018-05-23 15:15:21 -04:00
|
|
|
}
|
2018-03-23 14:25:53 -04:00
|
|
|
|
2018-05-23 15:15:21 -04:00
|
|
|
go c.processEventStream(ctx, ns)
|
2018-01-31 17:32:40 -05:00
|
|
|
|
2018-05-23 15:15:21 -04:00
|
|
|
return c, nil
|
2018-01-31 17:32:40 -05:00
|
|
|
}
|
|
|
|
|
2017-11-02 20:21:18 -04:00
|
|
|
func (c *client) Version(ctx context.Context) (containerd.Version, error) {
|
2018-05-23 15:15:21 -04:00
|
|
|
return c.client.Version(ctx)
|
2017-11-02 20:21:18 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (c *container) newTask(t containerd.Task) *task {
|
|
|
|
return &task{Task: t, ctr: c}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *container) AttachTask(ctx context.Context, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, err error) {
|
2018-01-04 16:12:23 -05:00
|
|
|
var dio *cio.DirectIO
|
2017-09-22 09:52:41 -04:00
|
|
|
defer func() {
|
2018-01-04 16:12:23 -05:00
|
|
|
if err != nil && dio != nil {
|
|
|
|
dio.Cancel()
|
|
|
|
dio.Close()
|
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
}()
|
|
|
|
|
2018-01-04 16:12:23 -05:00
|
|
|
attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
|
|
|
|
// dio must be assigned to the previously defined dio for the defer above
|
|
|
|
// to handle cleanup
|
2022-05-10 15:59:00 -04:00
|
|
|
dio, err = c.client.newDirectIO(ctx, fifos)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-01-04 16:12:23 -05:00
|
|
|
return attachStdio(dio)
|
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
t, err := c.c8dCtr.Task(ctx, attachIO)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
return c.newTask(t), nil
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (c *client) NewContainer(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
|
2019-03-25 16:17:17 -04:00
|
|
|
bdir := c.bundleDir(id)
|
2017-09-22 09:52:41 -04:00
|
|
|
c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
|
|
|
|
|
2019-04-10 23:00:32 -04:00
|
|
|
newOpts := []containerd.NewContainerOpts{
|
2017-09-22 09:52:41 -04:00
|
|
|
containerd.WithSpec(ociSpec),
|
2020-07-07 16:33:46 -04:00
|
|
|
containerd.WithRuntime(shim, runtimeOptions),
|
2019-03-25 14:19:09 -04:00
|
|
|
WithBundle(bdir, ociSpec),
|
2019-04-10 23:00:32 -04:00
|
|
|
}
|
|
|
|
opts = append(opts, newOpts...)
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
ctr, err := c.client.NewContainer(ctx, id, opts...)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2019-03-25 16:17:17 -04:00
|
|
|
if containerderrors.IsAlreadyExists(err) {
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
|
2019-03-25 16:17:17 -04:00
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, wrapError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
created := container{
|
|
|
|
client: c,
|
|
|
|
c8dCtr: ctr,
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
2020-07-07 16:33:46 -04:00
|
|
|
if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
|
2022-05-10 15:59:00 -04:00
|
|
|
created.v2runcoptions = x
|
2019-11-05 02:10:19 -05:00
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
return &created, nil
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start create and start a task for the specified containerd id
|
2022-05-10 15:59:00 -04:00
|
|
|
func (c *container) Start(ctx context.Context, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
|
2017-09-22 09:52:41 -04:00
|
|
|
var (
|
|
|
|
cp *types.Descriptor
|
|
|
|
t containerd.Task
|
2017-11-29 19:15:20 -05:00
|
|
|
rio cio.IO
|
2022-05-10 15:59:00 -04:00
|
|
|
stdinCloseSync = make(chan containerd.Process, 1)
|
2017-09-22 09:52:41 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
if checkpointDir != "" {
|
|
|
|
// write checkpoint to the content store
|
|
|
|
tar := archive.Diff(ctx, "", checkpointDir)
|
2022-05-10 15:59:00 -04:00
|
|
|
cp, err := c.client.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
|
2017-09-22 09:52:41 -04:00
|
|
|
// remove the checkpoint when we're done
|
|
|
|
defer func() {
|
|
|
|
if cp != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
err := c.client.client.ContentStore().Delete(ctx, cp.Digest)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
c.client.logger.WithError(err).WithFields(logrus.Fields{
|
2017-09-22 09:52:41 -04:00
|
|
|
"ref": checkpointDir,
|
|
|
|
"digest": cp.Digest,
|
|
|
|
}).Warnf("failed to delete temporary checkpoint entry")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if err := tar.Close(); err != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, errors.Wrap(err, "failed to close checkpoint tar stream")
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
if err != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, errors.Wrapf(err, "failed to upload checkpoint to containerd")
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
// Optimization: assume the relevant metadata has not changed in the
|
|
|
|
// moment since the container was created. Elide redundant RPC requests
|
|
|
|
// to refresh the metadata separately for spec and labels.
|
|
|
|
md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, errors.Wrap(err, "failed to retrieve metadata")
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
bundle := md.Labels[DockerContainerBundlePath]
|
|
|
|
|
|
|
|
var spec specs.Spec
|
|
|
|
if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to retrieve spec")
|
2019-03-25 16:17:17 -04:00
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
uid, gid := getSpecUser(&spec)
|
2019-10-28 13:30:13 -04:00
|
|
|
|
|
|
|
taskOpts := []containerd.NewTaskOpts{
|
|
|
|
func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
|
|
|
|
info.Checkpoint = cp
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if runtime.GOOS != "windows" {
|
|
|
|
taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
|
2022-05-10 15:59:00 -04:00
|
|
|
if c.v2runcoptions != nil {
|
|
|
|
opts := *c.v2runcoptions
|
2019-11-05 02:10:19 -05:00
|
|
|
opts.IoUid = uint32(uid)
|
|
|
|
opts.IoGid = uint32(gid)
|
|
|
|
info.Options = &opts
|
2019-10-28 13:30:13 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
} else {
|
2022-05-10 15:59:00 -04:00
|
|
|
taskOpts = append(taskOpts, withLogLevel(c.client.logger.Level))
|
2019-10-28 13:30:13 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
t, err = c.c8dCtr.NewTask(ctx,
|
2017-11-29 19:15:20 -05:00
|
|
|
func(id string) (cio.IO, error) {
|
2019-03-25 16:17:17 -04:00
|
|
|
fifos := newFIFOSet(bundle, libcontainerdtypes.InitProcessName, withStdin, spec.Process.Terminal)
|
2018-05-31 20:03:28 -04:00
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
rio, err = c.createIO(fifos, libcontainerdtypes.InitProcessName, stdinCloseSync, attachStdio)
|
2017-11-29 19:15:20 -05:00
|
|
|
return rio, err
|
2017-09-22 09:52:41 -04:00
|
|
|
},
|
2019-10-28 13:30:13 -04:00
|
|
|
taskOpts...,
|
|
|
|
)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
close(stdinCloseSync)
|
2017-11-29 19:15:20 -05:00
|
|
|
if rio != nil {
|
|
|
|
rio.Cancel()
|
|
|
|
rio.Close()
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, errors.Wrap(wrapError(err), "failed to create task for container")
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Signal c.createIO that it can call CloseIO
|
2022-05-10 15:59:00 -04:00
|
|
|
stdinCloseSync <- t
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
if err := t.Start(ctx); err != nil {
|
|
|
|
if _, err := t.Delete(ctx); err != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
c.client.logger.WithError(err).WithField("container", c.c8dCtr.ID()).
|
2017-09-22 09:52:41 -04:00
|
|
|
Error("failed to delete task after fail start")
|
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
return c.newTask(t), nil
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2018-10-10 02:54:00 -04:00
|
|
|
// Exec creates exec process.
|
|
|
|
//
|
|
|
|
// The containerd client calls Exec to register the exec config in the shim side.
|
|
|
|
// When the client calls Start, the shim will create stdin fifo if needs. But
|
|
|
|
// for the container main process, the stdin fifo will be created in Create not
|
|
|
|
// the Start call. stdinCloseSync channel should be closed after Start exec
|
|
|
|
// process.
|
2022-05-10 15:59:00 -04:00
|
|
|
func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {
|
2017-09-22 09:52:41 -04:00
|
|
|
var (
|
|
|
|
p containerd.Process
|
2017-11-29 19:15:20 -05:00
|
|
|
rio cio.IO
|
2022-05-10 15:59:00 -04:00
|
|
|
stdinCloseSync = make(chan containerd.Process, 1)
|
2017-09-22 09:52:41 -04:00
|
|
|
)
|
2017-11-13 17:53:56 -05:00
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
// Optimization: assume the DockerContainerBundlePath label has not been
|
|
|
|
// updated since the container metadata was last loaded/refreshed.
|
|
|
|
md, err := t.ctr.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
|
2019-03-25 16:17:17 -04:00
|
|
|
if err != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, wrapError(err)
|
2019-03-25 16:17:17 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
fifos := newFIFOSet(md.Labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
|
2017-11-13 17:53:56 -05:00
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2017-11-29 19:15:20 -05:00
|
|
|
if rio != nil {
|
|
|
|
rio.Cancel()
|
|
|
|
rio.Close()
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
p, err = t.Task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
|
|
|
|
rio, err = t.ctr.createIO(fifos, processID, stdinCloseSync, attachStdio)
|
2017-11-29 19:15:20 -05:00
|
|
|
return rio, err
|
2017-09-22 09:52:41 -04:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
close(stdinCloseSync)
|
2019-03-25 16:17:17 -04:00
|
|
|
if containerderrors.IsAlreadyExists(err) {
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
|
2019-03-25 16:17:17 -04:00
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Signal c.createIO that it can call CloseIO
|
2018-10-10 02:54:00 -04:00
|
|
|
//
|
|
|
|
// the stdin of exec process will be created after p.Start in containerd
|
2022-05-10 15:59:00 -04:00
|
|
|
defer func() { stdinCloseSync <- p }()
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
if err = p.Start(ctx); err != nil {
|
2018-12-17 05:22:37 -05:00
|
|
|
// use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
|
|
|
|
// we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
|
|
|
|
// older containerd-shim
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
p.Delete(ctx)
|
2022-05-10 15:59:00 -04:00
|
|
|
return nil, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
return process{p}, nil
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (t *task) Kill(ctx context.Context, signal syscall.Signal) error {
|
|
|
|
return wrapError(t.Task.Kill(ctx, signal))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (p process) Kill(ctx context.Context, signal syscall.Signal) error {
|
|
|
|
return wrapError(p.Process.Kill(ctx, signal))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (t *task) Pause(ctx context.Context) error {
|
|
|
|
return wrapError(t.Task.Pause(ctx))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (t *task) Resume(ctx context.Context) error {
|
|
|
|
return wrapError(t.Task.Resume(ctx))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) {
|
|
|
|
m, err := t.Metrics(ctx)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
v, err := typeurl.UnmarshalAny(m.Data)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) {
|
|
|
|
pis, err := t.Pids(ctx)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
var infos []libcontainerdtypes.Summary
|
2017-09-22 09:52:41 -04:00
|
|
|
for _, pi := range pis {
|
|
|
|
i, err := typeurl.UnmarshalAny(pi.Info)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "unable to decode process details")
|
|
|
|
}
|
|
|
|
s, err := summaryFromInterface(i)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
infos = append(infos, *s)
|
|
|
|
}
|
|
|
|
|
|
|
|
return infos, nil
|
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
|
|
|
|
s, err := t.Task.Delete(ctx)
|
|
|
|
return s, wrapError(err)
|
2019-03-25 16:17:17 -04:00
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (p process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
|
|
|
|
s, err := p.Process.Delete(ctx)
|
|
|
|
return s, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (c *container) Delete(ctx context.Context) error {
|
|
|
|
// Optimization: assume the DockerContainerBundlePath label has not been
|
|
|
|
// updated since the container metadata was last loaded/refreshed.
|
|
|
|
md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
|
2019-03-25 16:17:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
bundle := md.Labels[DockerContainerBundlePath]
|
|
|
|
if err := c.c8dCtr.Delete(ctx); err != nil {
|
2018-03-27 10:03:53 -04:00
|
|
|
return wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
2017-11-13 17:53:56 -05:00
|
|
|
if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
|
2019-03-25 16:17:17 -04:00
|
|
|
if err := os.RemoveAll(bundle); err != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
c.client.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{
|
|
|
|
"container": c.c8dCtr.ID(),
|
2019-03-25 16:17:17 -04:00
|
|
|
"bundle": bundle,
|
2017-09-22 09:52:41 -04:00
|
|
|
}).Error("failed to remove state dir")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (t *task) ForceDelete(ctx context.Context) error {
|
|
|
|
_, err := t.Task.Delete(ctx, containerd.WithProcessKill)
|
|
|
|
return wrapError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *task) Status(ctx context.Context) (containerd.Status, error) {
|
|
|
|
s, err := t.Task.Status(ctx)
|
|
|
|
return s, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (p process) Status(ctx context.Context) (containerd.Status, error) {
|
|
|
|
s, err := p.Process.Status(ctx)
|
|
|
|
return s, wrapError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *container) getCheckpointOptions(exit bool) containerd.CheckpointTaskOpts {
|
2020-10-12 17:44:30 -04:00
|
|
|
return func(r *containerd.CheckpointTaskInfo) error {
|
2022-05-10 15:59:00 -04:00
|
|
|
if r.Options == nil && c.v2runcoptions != nil {
|
|
|
|
r.Options = &v2runcoptions.CheckpointOptions{}
|
2020-10-12 17:44:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
switch opts := r.Options.(type) {
|
|
|
|
case *v2runcoptions.CheckpointOptions:
|
|
|
|
opts.Exit = exit
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (t *task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
|
|
|
|
img, err := t.Task.Checkpoint(ctx, t.ctr.getCheckpointOptions(exit))
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2018-03-27 10:03:53 -04:00
|
|
|
return wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
// Whatever happens, delete the checkpoint from containerd
|
|
|
|
defer func() {
|
2022-05-10 15:59:00 -04:00
|
|
|
err := t.ctr.client.client.ImageService().Delete(ctx, img.Name())
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
t.ctr.client.logger.WithError(err).WithField("digest", img.Target().Digest).
|
2017-09-22 09:52:41 -04:00
|
|
|
Warnf("failed to delete checkpoint image")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
b, err := content.ReadBlob(ctx, t.ctr.client.client.ContentStore(), img.Target())
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
var index v1.Index
|
|
|
|
if err := json.Unmarshal(b, &index); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var cpDesc *v1.Descriptor
|
|
|
|
for _, m := range index.Manifests {
|
2021-04-16 11:21:26 -04:00
|
|
|
m := m
|
2017-09-22 09:52:41 -04:00
|
|
|
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
|
2022-07-13 16:30:47 -04:00
|
|
|
cpDesc = &m //nolint:gosec
|
2017-09-22 09:52:41 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if cpDesc == nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
rat, err := t.ctr.client.client.ContentStore().ReaderAt(ctx, *cpDesc)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
defer rat.Close()
|
|
|
|
_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
|
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
// LoadContainer loads the containerd container.
|
|
|
|
func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
|
2019-03-25 16:17:17 -04:00
|
|
|
ctr, err := c.client.LoadContainer(ctx, id)
|
|
|
|
if err != nil {
|
|
|
|
if containerderrors.IsNotFound(err) {
|
|
|
|
return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
|
|
|
|
}
|
|
|
|
return nil, wrapError(err)
|
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
return &container{client: c, c8dCtr: ctr}, nil
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2022-05-10 15:59:00 -04:00
|
|
|
func (c *container) Task(ctx context.Context) (libcontainerdtypes.Task, error) {
|
|
|
|
t, err := c.c8dCtr.Task(ctx, nil)
|
2019-03-25 16:17:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, wrapError(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
2022-05-10 15:59:00 -04:00
|
|
|
return c.newTask(t), nil
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// createIO creates the io to be used by a process
|
|
|
|
// This needs to get a pointer to interface as upon closure the process may not have yet been registered
|
2022-05-10 15:59:00 -04:00
|
|
|
func (c *container) createIO(fifos *cio.FIFOSet, processID string, stdinCloseSync chan containerd.Process, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
|
2018-05-31 20:03:28 -04:00
|
|
|
var (
|
|
|
|
io *cio.DirectIO
|
|
|
|
err error
|
|
|
|
)
|
2022-05-10 15:59:00 -04:00
|
|
|
io, err = c.client.newDirectIO(context.Background(), fifos)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if io.Stdin != nil {
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
stdinOnce sync.Once
|
|
|
|
)
|
|
|
|
pipe := io.Stdin
|
|
|
|
io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
|
|
|
|
stdinOnce.Do(func() {
|
|
|
|
err = pipe.Close()
|
|
|
|
// Do the rest in a new routine to avoid a deadlock if the
|
|
|
|
// Exec/Start call failed.
|
|
|
|
go func() {
|
2022-05-10 15:59:00 -04:00
|
|
|
p, ok := <-stdinCloseSync
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = p.CloseIO(context.Background(), containerd.WithStdinCloser)
|
|
|
|
if err != nil && strings.Contains(err.Error(), "transport is closing") {
|
|
|
|
err = nil
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-11-29 19:15:20 -05:00
|
|
|
rio, err := attachStdio(io)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
io.Cancel()
|
|
|
|
io.Close()
|
|
|
|
}
|
2017-11-29 19:15:20 -05:00
|
|
|
return rio, err
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2019-03-25 16:17:17 -04:00
|
|
|
func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
c.eventQ.Append(ei.ContainerID, func() {
|
2017-09-22 09:52:41 -04:00
|
|
|
err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
|
|
|
|
if err != nil {
|
2022-05-10 15:59:00 -04:00
|
|
|
c.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{
|
2017-09-22 09:52:41 -04:00
|
|
|
"container": ei.ContainerID,
|
|
|
|
"event": et,
|
|
|
|
"event-info": ei,
|
|
|
|
}).Error("failed to process event")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-07-17 16:34:57 -04:00
|
|
|
func (c *client) waitServe(ctx context.Context) bool {
|
|
|
|
t := 100 * time.Millisecond
|
|
|
|
delay := time.NewTimer(t)
|
|
|
|
if !delay.Stop() {
|
|
|
|
<-delay.C
|
|
|
|
}
|
|
|
|
defer delay.Stop()
|
|
|
|
|
|
|
|
// `IsServing` will actually block until the service is ready.
|
|
|
|
// However it can return early, so we'll loop with a delay to handle it.
|
|
|
|
for {
|
2020-07-17 14:39:32 -04:00
|
|
|
serving, err := c.client.IsServing(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
logrus.WithError(err).Warn("Error while testing if containerd API is ready")
|
2020-07-17 16:34:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if serving {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
delay.Reset(t)
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return false
|
|
|
|
case <-delay.C:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:15:21 -04:00
|
|
|
func (c *client) processEventStream(ctx context.Context, ns string) {
|
2017-09-22 09:52:41 -04:00
|
|
|
var (
|
2018-04-18 17:55:50 -04:00
|
|
|
err error
|
|
|
|
ev *events.Envelope
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
et libcontainerdtypes.EventType
|
|
|
|
ei libcontainerdtypes.EventInfo
|
2017-09-22 09:52:41 -04:00
|
|
|
)
|
|
|
|
|
2020-07-17 14:39:32 -04:00
|
|
|
// Create a new context specifically for this subscription.
|
|
|
|
// The context must be cancelled to cancel the subscription.
|
|
|
|
// In cases where we have to restart event stream processing,
|
|
|
|
// we'll need the original context b/c this one will be cancelled
|
|
|
|
subCtx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2018-04-18 17:55:50 -04:00
|
|
|
// Filter on both namespace *and* topic. To create an "and" filter,
|
|
|
|
// this must be a single, comma-separated string
|
2020-07-17 14:39:32 -04:00
|
|
|
eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
|
2017-09-22 09:52:41 -04:00
|
|
|
|
2018-05-23 15:15:21 -04:00
|
|
|
c.logger.Debug("processing event stream")
|
2018-01-31 17:32:40 -05:00
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
for {
|
2018-04-18 17:55:50 -04:00
|
|
|
select {
|
|
|
|
case err = <-errC:
|
|
|
|
if err != nil {
|
|
|
|
errStatus, ok := status.FromError(err)
|
|
|
|
if !ok || errStatus.Code() != codes.Canceled {
|
2020-07-17 16:34:57 -04:00
|
|
|
c.logger.WithError(err).Error("Failed to get event")
|
|
|
|
c.logger.Info("Waiting for containerd to be ready to restart event processing")
|
|
|
|
if c.waitServe(ctx) {
|
2019-07-12 18:29:57 -04:00
|
|
|
go c.processEventStream(ctx, ns)
|
|
|
|
return
|
|
|
|
}
|
2018-04-18 17:55:50 -04:00
|
|
|
}
|
2019-07-12 18:29:57 -04:00
|
|
|
c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
|
2017-11-14 17:00:47 -05:00
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
return
|
2018-04-18 17:55:50 -04:00
|
|
|
case ev = <-eventStream:
|
|
|
|
if ev.Event == nil {
|
|
|
|
c.logger.WithField("event", ev).Warn("invalid event")
|
|
|
|
continue
|
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
|
2018-04-18 17:55:50 -04:00
|
|
|
v, err := typeurl.UnmarshalAny(ev.Event)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
|
|
|
|
continue
|
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
|
2018-04-18 17:55:50 -04:00
|
|
|
c.logger.WithField("topic", ev.Topic).Debug("event")
|
2017-09-22 09:52:41 -04:00
|
|
|
|
2018-04-18 17:55:50 -04:00
|
|
|
switch t := v.(type) {
|
|
|
|
case *apievents.TaskCreate:
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
et = libcontainerdtypes.EventCreate
|
|
|
|
ei = libcontainerdtypes.EventInfo{
|
2018-04-18 17:55:50 -04:00
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ContainerID,
|
|
|
|
Pid: t.Pid,
|
|
|
|
}
|
|
|
|
case *apievents.TaskStart:
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
et = libcontainerdtypes.EventStart
|
|
|
|
ei = libcontainerdtypes.EventInfo{
|
2018-04-18 17:55:50 -04:00
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ContainerID,
|
|
|
|
Pid: t.Pid,
|
|
|
|
}
|
|
|
|
case *apievents.TaskExit:
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
et = libcontainerdtypes.EventExit
|
|
|
|
ei = libcontainerdtypes.EventInfo{
|
2018-04-18 17:55:50 -04:00
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ID,
|
|
|
|
Pid: t.Pid,
|
|
|
|
ExitCode: t.ExitStatus,
|
|
|
|
ExitedAt: t.ExitedAt,
|
|
|
|
}
|
|
|
|
case *apievents.TaskOOM:
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
et = libcontainerdtypes.EventOOM
|
|
|
|
ei = libcontainerdtypes.EventInfo{
|
2018-04-18 17:55:50 -04:00
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
}
|
|
|
|
case *apievents.TaskExecAdded:
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
et = libcontainerdtypes.EventExecAdded
|
|
|
|
ei = libcontainerdtypes.EventInfo{
|
2018-04-18 17:55:50 -04:00
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ExecID,
|
|
|
|
}
|
|
|
|
case *apievents.TaskExecStarted:
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
et = libcontainerdtypes.EventExecStarted
|
|
|
|
ei = libcontainerdtypes.EventInfo{
|
2018-04-18 17:55:50 -04:00
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
ProcessID: t.ExecID,
|
|
|
|
Pid: t.Pid,
|
|
|
|
}
|
|
|
|
case *apievents.TaskPaused:
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
et = libcontainerdtypes.EventPaused
|
|
|
|
ei = libcontainerdtypes.EventInfo{
|
2018-04-18 17:55:50 -04:00
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
}
|
|
|
|
case *apievents.TaskResumed:
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 17:30:52 -05:00
|
|
|
et = libcontainerdtypes.EventResumed
|
|
|
|
ei = libcontainerdtypes.EventInfo{
|
2018-04-18 17:55:50 -04:00
|
|
|
ContainerID: t.ContainerID,
|
|
|
|
}
|
2020-10-30 23:58:38 -04:00
|
|
|
case *apievents.TaskDelete:
|
|
|
|
c.logger.WithFields(logrus.Fields{
|
|
|
|
"topic": ev.Topic,
|
|
|
|
"type": reflect.TypeOf(t),
|
|
|
|
"container": t.ContainerID},
|
|
|
|
).Info("ignoring event")
|
|
|
|
continue
|
2018-04-18 17:55:50 -04:00
|
|
|
default:
|
|
|
|
c.logger.WithFields(logrus.Fields{
|
|
|
|
"topic": ev.Topic,
|
|
|
|
"type": reflect.TypeOf(t)},
|
|
|
|
).Info("ignoring event")
|
|
|
|
continue
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
2018-04-18 17:55:50 -04:00
|
|
|
|
2019-03-25 16:17:17 -04:00
|
|
|
c.processEvent(ctx, et, ei)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
|
2018-05-23 15:15:21 -04:00
|
|
|
writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer writer.Close()
|
|
|
|
size, err := io.Copy(writer, r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
labels := map[string]string{
|
|
|
|
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
|
|
|
|
}
|
|
|
|
if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &types.Descriptor{
|
|
|
|
MediaType: mediaType,
|
|
|
|
Digest: writer.Digest(),
|
|
|
|
Size_: size,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-03-25 16:17:17 -04:00
|
|
|
func (c *client) bundleDir(id string) string {
|
|
|
|
return filepath.Join(c.stateDir, id)
|
|
|
|
}
|
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
func wrapError(err error) error {
|
2017-12-15 10:00:15 -05:00
|
|
|
switch {
|
2018-01-04 16:12:23 -05:00
|
|
|
case err == nil:
|
|
|
|
return nil
|
2017-11-28 23:09:37 -05:00
|
|
|
case containerderrors.IsNotFound(err):
|
|
|
|
return errdefs.NotFound(err)
|
2017-12-15 10:00:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
msg := err.Error()
|
|
|
|
for _, s := range []string{"container does not exist", "not found", "no such container"} {
|
|
|
|
if strings.Contains(msg, s) {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.NotFound(err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|