mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
wip: bump containerd and runc version
Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
parent
7af4c904b3
commit
d13528c635
18 changed files with 342 additions and 350 deletions
|
@ -4,7 +4,7 @@
|
|||
# containerd is also pinned in vendor.conf. When updating the binary
|
||||
# version you may also need to update the vendor version to pick up bug
|
||||
# fixes or new APIs.
|
||||
CONTAINERD_COMMIT=8afcade1a6041b8301b6f6d8b88bae909993b989 # v1.2.0-13-g8afcade1
|
||||
CONTAINERD_COMMIT=aa537a67b3a9026c423a63dbaf71e4cc0776b2ad # v1.2.0-13-g8afcade1
|
||||
|
||||
install_containerd() {
|
||||
echo "Install containerd version $CONTAINERD_COMMIT"
|
||||
|
|
|
@ -79,7 +79,7 @@ google.golang.org/grpc v1.12.0
|
|||
# the containerd project first, and update both after that is merged.
|
||||
# This commit does not need to match RUNC_COMMIT as it is used for helper
|
||||
# packages but should be newer or equal.
|
||||
github.com/opencontainers/runc 9f1e94488e5e478e084fef997f022565b64b01d9
|
||||
github.com/opencontainers/runc 10d38b660a77168360df3522881e2dc2be5056bd
|
||||
github.com/opencontainers/runtime-spec 5684b8af48c1ac3b1451fa499724e30e3c20a294 # v1.0.1-49-g5684b8a
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||
|
@ -118,7 +118,7 @@ github.com/googleapis/gax-go v2.0.0
|
|||
google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
|
||||
|
||||
# containerd
|
||||
github.com/containerd/containerd c4446665cb9c30056f4998ed953e6d4ff22c7c39 # v1.2.0
|
||||
github.com/containerd/containerd aa537a67b3a9026c423a63dbaf71e4cc0776b2ad # v1.2.0
|
||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
|
||||
github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2
|
||||
|
|
8
vendor/github.com/containerd/containerd/README.md
generated
vendored
8
vendor/github.com/containerd/containerd/README.md
generated
vendored
|
@ -224,7 +224,8 @@ This will be the best place to discuss design and implementation.
|
|||
|
||||
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
|
||||
|
||||
**Slack:** https://join.slack.com/t/dockercommunity/shared_invite/enQtNDM4NjAwNDMyOTUwLWZlMDZmYWRjZjk4Zjc5ZGQ5NWZkOWI1Yjk2NGE3ZWVlYjYxM2VhYjczOWIyZDFhZTE3NTUwZWQzMjhmNGYyZTg
|
||||
**Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com.
|
||||
[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ)
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
|
@ -249,3 +250,8 @@ Please find all these core project documents, including the:
|
|||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
## Adoption
|
||||
|
||||
Interested to see who is using containerd? Are you using containerd in a project?
|
||||
Please add yourself via pull request to our [ADOPTERS.md](./ADOPTERS.md) file.
|
||||
|
|
2
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
2
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
|
@ -75,7 +75,7 @@ type ResolverOptions struct {
|
|||
|
||||
// Credentials provides username and secret given a host.
|
||||
// If username is empty but a secret is given, that secret
|
||||
// is interpretted as a long lived token.
|
||||
// is interpreted as a long lived token.
|
||||
// Deprecated: use Authorizer
|
||||
Credentials func(string) (string, string, error)
|
||||
|
||||
|
|
5
vendor/github.com/containerd/containerd/runtime/proc/proc.go
generated
vendored
5
vendor/github.com/containerd/containerd/runtime/proc/proc.go
generated
vendored
|
@ -40,7 +40,6 @@ func (s Stdio) IsNull() bool {
|
|||
|
||||
// Process on a system
|
||||
type Process interface {
|
||||
State
|
||||
// ID returns the id for the process
|
||||
ID() string
|
||||
// Pid returns the pid for the process
|
||||
|
@ -57,10 +56,6 @@ type Process interface {
|
|||
Status(context.Context) (string, error)
|
||||
// Wait blocks until the process has exited
|
||||
Wait()
|
||||
}
|
||||
|
||||
// State of a process
|
||||
type State interface {
|
||||
// Resize resizes the process console
|
||||
Resize(ws console.WinSize) error
|
||||
// Start execution of the process
|
||||
|
|
37
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go
generated
vendored
37
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go
generated
vendored
|
@ -41,7 +41,7 @@ import (
|
|||
type execProcess struct {
|
||||
wg sync.WaitGroup
|
||||
|
||||
proc.State
|
||||
execState execState
|
||||
|
||||
mu sync.Mutex
|
||||
id string
|
||||
|
@ -86,6 +86,13 @@ func (e *execProcess) ExitedAt() time.Time {
|
|||
return e.exited
|
||||
}
|
||||
|
||||
func (e *execProcess) SetExited(status int) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
e.execState.SetExited(status)
|
||||
}
|
||||
|
||||
func (e *execProcess) setExited(status int) {
|
||||
e.status = status
|
||||
e.exited = time.Now()
|
||||
|
@ -93,6 +100,13 @@ func (e *execProcess) setExited(status int) {
|
|||
close(e.waitBlock)
|
||||
}
|
||||
|
||||
func (e *execProcess) Delete(ctx context.Context) error {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
return e.execState.Delete(ctx)
|
||||
}
|
||||
|
||||
func (e *execProcess) delete(ctx context.Context) error {
|
||||
e.wg.Wait()
|
||||
if e.io != nil {
|
||||
|
@ -107,6 +121,13 @@ func (e *execProcess) delete(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *execProcess) Resize(ws console.WinSize) error {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
return e.execState.Resize(ws)
|
||||
}
|
||||
|
||||
func (e *execProcess) resize(ws console.WinSize) error {
|
||||
if e.console == nil {
|
||||
return nil
|
||||
|
@ -114,6 +135,13 @@ func (e *execProcess) resize(ws console.WinSize) error {
|
|||
return e.console.Resize(ws)
|
||||
}
|
||||
|
||||
func (e *execProcess) Kill(ctx context.Context, sig uint32, _ bool) error {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
return e.execState.Kill(ctx, sig, false)
|
||||
}
|
||||
|
||||
func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error {
|
||||
pid := e.pid
|
||||
if pid != 0 {
|
||||
|
@ -132,6 +160,13 @@ func (e *execProcess) Stdio() proc.Stdio {
|
|||
return e.stdio
|
||||
}
|
||||
|
||||
func (e *execProcess) Start(ctx context.Context) error {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
return e.execState.Start(ctx)
|
||||
}
|
||||
|
||||
func (e *execProcess) start(ctx context.Context) (err error) {
|
||||
var (
|
||||
socket *runc.Socket
|
||||
|
|
59
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go
generated
vendored
59
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go
generated
vendored
|
@ -25,6 +25,14 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type execState interface {
|
||||
Resize(console.WinSize) error
|
||||
Start(context.Context) error
|
||||
Delete(context.Context) error
|
||||
Kill(context.Context, uint32, bool) error
|
||||
SetExited(int)
|
||||
}
|
||||
|
||||
type execCreatedState struct {
|
||||
p *execProcess
|
||||
}
|
||||
|
@ -32,11 +40,11 @@ type execCreatedState struct {
|
|||
func (s *execCreatedState) transition(name string) error {
|
||||
switch name {
|
||||
case "running":
|
||||
s.p.State = &execRunningState{p: s.p}
|
||||
s.p.execState = &execRunningState{p: s.p}
|
||||
case "stopped":
|
||||
s.p.State = &execStoppedState{p: s.p}
|
||||
s.p.execState = &execStoppedState{p: s.p}
|
||||
case "deleted":
|
||||
s.p.State = &deletedState{}
|
||||
s.p.execState = &deletedState{}
|
||||
default:
|
||||
return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
|
||||
}
|
||||
|
@ -44,15 +52,10 @@ func (s *execCreatedState) transition(name string) error {
|
|||
}
|
||||
|
||||
func (s *execCreatedState) Resize(ws console.WinSize) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.resize(ws)
|
||||
}
|
||||
|
||||
func (s *execCreatedState) Start(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
if err := s.p.start(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -63,22 +66,15 @@ func (s *execCreatedState) Delete(ctx context.Context) error {
|
|||
if err := s.p.delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.transition("deleted")
|
||||
}
|
||||
|
||||
func (s *execCreatedState) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.kill(ctx, sig, all)
|
||||
}
|
||||
|
||||
func (s *execCreatedState) SetExited(status int) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
s.p.setExited(status)
|
||||
|
||||
if err := s.transition("stopped"); err != nil {
|
||||
|
@ -93,7 +89,7 @@ type execRunningState struct {
|
|||
func (s *execRunningState) transition(name string) error {
|
||||
switch name {
|
||||
case "stopped":
|
||||
s.p.State = &execStoppedState{p: s.p}
|
||||
s.p.execState = &execStoppedState{p: s.p}
|
||||
default:
|
||||
return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
|
||||
}
|
||||
|
@ -101,37 +97,22 @@ func (s *execRunningState) transition(name string) error {
|
|||
}
|
||||
|
||||
func (s *execRunningState) Resize(ws console.WinSize) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.resize(ws)
|
||||
}
|
||||
|
||||
func (s *execRunningState) Start(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot start a running process")
|
||||
}
|
||||
|
||||
func (s *execRunningState) Delete(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot delete a running process")
|
||||
}
|
||||
|
||||
func (s *execRunningState) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.kill(ctx, sig, all)
|
||||
}
|
||||
|
||||
func (s *execRunningState) SetExited(status int) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
s.p.setExited(status)
|
||||
|
||||
if err := s.transition("stopped"); err != nil {
|
||||
|
@ -146,7 +127,7 @@ type execStoppedState struct {
|
|||
func (s *execStoppedState) transition(name string) error {
|
||||
switch name {
|
||||
case "deleted":
|
||||
s.p.State = &deletedState{}
|
||||
s.p.execState = &deletedState{}
|
||||
default:
|
||||
return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
|
||||
}
|
||||
|
@ -154,16 +135,10 @@ func (s *execStoppedState) transition(name string) error {
|
|||
}
|
||||
|
||||
func (s *execStoppedState) Resize(ws console.WinSize) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot resize a stopped container")
|
||||
}
|
||||
|
||||
func (s *execStoppedState) Start(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot start a stopped process")
|
||||
}
|
||||
|
||||
|
@ -171,15 +146,11 @@ func (s *execStoppedState) Delete(ctx context.Context) error {
|
|||
if err := s.p.delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.transition("deleted")
|
||||
}
|
||||
|
||||
func (s *execStoppedState) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.kill(ctx, sig, all)
|
||||
}
|
||||
|
||||
|
|
124
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go
generated
vendored
124
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go
generated
vendored
|
@ -46,8 +46,8 @@ const InitPidFile = "init.pid"
|
|||
|
||||
// Init represents an initial process for a container
|
||||
type Init struct {
|
||||
wg sync.WaitGroup
|
||||
initState
|
||||
wg sync.WaitGroup
|
||||
initState initState
|
||||
|
||||
// mu is used to ensure that `Start()` and `Exited()` calls return in
|
||||
// the right order when invoked in separate go routines.
|
||||
|
@ -212,6 +212,7 @@ func (p *Init) Pid() int {
|
|||
func (p *Init) ExitStatus() int {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.status
|
||||
}
|
||||
|
||||
|
@ -219,6 +220,7 @@ func (p *Init) ExitStatus() int {
|
|||
func (p *Init) ExitedAt() time.Time {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.exited
|
||||
}
|
||||
|
||||
|
@ -226,6 +228,7 @@ func (p *Init) ExitedAt() time.Time {
|
|||
func (p *Init) Status(ctx context.Context) (string, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
c, err := p.runtime.State(ctx, p.id)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "does not exist") {
|
||||
|
@ -236,11 +239,27 @@ func (p *Init) Status(ctx context.Context) (string, error) {
|
|||
return c.Status, nil
|
||||
}
|
||||
|
||||
func (p *Init) start(context context.Context) error {
|
||||
err := p.runtime.Start(context, p.id)
|
||||
// Start the init process
|
||||
func (p *Init) Start(ctx context.Context) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.initState.Start(ctx)
|
||||
}
|
||||
|
||||
func (p *Init) start(ctx context.Context) error {
|
||||
err := p.runtime.Start(ctx, p.id)
|
||||
return p.runtimeError(err, "OCI runtime start failed")
|
||||
}
|
||||
|
||||
// SetExited of the init process with the next status
|
||||
func (p *Init) SetExited(status int) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
p.initState.SetExited(status)
|
||||
}
|
||||
|
||||
func (p *Init) setExited(status int) {
|
||||
p.exited = time.Now()
|
||||
p.status = status
|
||||
|
@ -248,9 +267,17 @@ func (p *Init) setExited(status int) {
|
|||
close(p.waitBlock)
|
||||
}
|
||||
|
||||
func (p *Init) delete(context context.Context) error {
|
||||
// Delete the init process
|
||||
func (p *Init) Delete(ctx context.Context) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.initState.Delete(ctx)
|
||||
}
|
||||
|
||||
func (p *Init) delete(ctx context.Context) error {
|
||||
p.wg.Wait()
|
||||
err := p.runtime.Delete(context, p.id, nil)
|
||||
err := p.runtime.Delete(ctx, p.id, nil)
|
||||
// ignore errors if a runtime has already deleted the process
|
||||
// but we still hold metadata and pipes
|
||||
//
|
||||
|
@ -270,7 +297,7 @@ func (p *Init) delete(context context.Context) error {
|
|||
p.io.Close()
|
||||
}
|
||||
if err2 := mount.UnmountAll(p.Rootfs, 0); err2 != nil {
|
||||
log.G(context).WithError(err2).Warn("failed to cleanup rootfs mount")
|
||||
log.G(ctx).WithError(err2).Warn("failed to cleanup rootfs mount")
|
||||
if err == nil {
|
||||
err = errors.Wrap(err2, "failed rootfs umount")
|
||||
}
|
||||
|
@ -278,6 +305,17 @@ func (p *Init) delete(context context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Resize the init processes console
|
||||
func (p *Init) Resize(ws console.WinSize) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.console == nil {
|
||||
return nil
|
||||
}
|
||||
return p.console.Resize(ws)
|
||||
}
|
||||
|
||||
func (p *Init) resize(ws console.WinSize) error {
|
||||
if p.console == nil {
|
||||
return nil
|
||||
|
@ -285,26 +323,40 @@ func (p *Init) resize(ws console.WinSize) error {
|
|||
return p.console.Resize(ws)
|
||||
}
|
||||
|
||||
func (p *Init) pause(context context.Context) error {
|
||||
err := p.runtime.Pause(context, p.id)
|
||||
return p.runtimeError(err, "OCI runtime pause failed")
|
||||
// Pause the init process and all its child processes
|
||||
func (p *Init) Pause(ctx context.Context) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.initState.Pause(ctx)
|
||||
}
|
||||
|
||||
func (p *Init) resume(context context.Context) error {
|
||||
err := p.runtime.Resume(context, p.id)
|
||||
return p.runtimeError(err, "OCI runtime resume failed")
|
||||
// Resume the init process and all its child processes
|
||||
func (p *Init) Resume(ctx context.Context) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.initState.Resume(ctx)
|
||||
}
|
||||
|
||||
func (p *Init) kill(context context.Context, signal uint32, all bool) error {
|
||||
err := p.runtime.Kill(context, p.id, int(signal), &runc.KillOpts{
|
||||
// Kill the init process
|
||||
func (p *Init) Kill(ctx context.Context, signal uint32, all bool) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.initState.Kill(ctx, signal, all)
|
||||
}
|
||||
|
||||
func (p *Init) kill(ctx context.Context, signal uint32, all bool) error {
|
||||
err := p.runtime.Kill(ctx, p.id, int(signal), &runc.KillOpts{
|
||||
All: all,
|
||||
})
|
||||
return checkKillError(err)
|
||||
}
|
||||
|
||||
// KillAll processes belonging to the init process
|
||||
func (p *Init) KillAll(context context.Context) error {
|
||||
err := p.runtime.Kill(context, p.id, int(syscall.SIGKILL), &runc.KillOpts{
|
||||
func (p *Init) KillAll(ctx context.Context) error {
|
||||
err := p.runtime.Kill(ctx, p.id, int(syscall.SIGKILL), &runc.KillOpts{
|
||||
All: true,
|
||||
})
|
||||
return p.runtimeError(err, "OCI runtime killall failed")
|
||||
|
@ -320,8 +372,16 @@ func (p *Init) Runtime() *runc.Runc {
|
|||
return p.runtime
|
||||
}
|
||||
|
||||
// Exec returns a new child process
|
||||
func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.initState.Exec(ctx, path, r)
|
||||
}
|
||||
|
||||
// exec returns a new exec'd process
|
||||
func (p *Init) exec(context context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
||||
func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
||||
// process exec request
|
||||
var spec specs.Process
|
||||
if err := json.Unmarshal(r.Spec.Value, &spec); err != nil {
|
||||
|
@ -342,18 +402,26 @@ func (p *Init) exec(context context.Context, path string, r *ExecConfig) (proc.P
|
|||
},
|
||||
waitBlock: make(chan struct{}),
|
||||
}
|
||||
e.State = &execCreatedState{p: e}
|
||||
e.execState = &execCreatedState{p: e}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (p *Init) checkpoint(context context.Context, r *CheckpointConfig) error {
|
||||
// Checkpoint the init process
|
||||
func (p *Init) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.initState.Checkpoint(ctx, r)
|
||||
}
|
||||
|
||||
func (p *Init) checkpoint(ctx context.Context, r *CheckpointConfig) error {
|
||||
var actions []runc.CheckpointAction
|
||||
if !r.Exit {
|
||||
actions = append(actions, runc.LeaveRunning)
|
||||
}
|
||||
work := filepath.Join(p.WorkDir, "criu-work")
|
||||
defer os.RemoveAll(work)
|
||||
if err := p.runtime.Checkpoint(context, p.id, &runc.CheckpointOpts{
|
||||
if err := p.runtime.Checkpoint(ctx, p.id, &runc.CheckpointOpts{
|
||||
WorkDir: work,
|
||||
ImagePath: r.Path,
|
||||
AllowOpenTCP: r.AllowOpenTCP,
|
||||
|
@ -364,19 +432,27 @@ func (p *Init) checkpoint(context context.Context, r *CheckpointConfig) error {
|
|||
}, actions...); err != nil {
|
||||
dumpLog := filepath.Join(p.Bundle, "criu-dump.log")
|
||||
if cerr := copyFile(dumpLog, filepath.Join(work, "dump.log")); cerr != nil {
|
||||
log.G(context).Error(err)
|
||||
log.G(ctx).Error(err)
|
||||
}
|
||||
return fmt.Errorf("%s path= %s", criuError(err), dumpLog)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Init) update(context context.Context, r *google_protobuf.Any) error {
|
||||
// Update the processes resource configuration
|
||||
func (p *Init) Update(ctx context.Context, r *google_protobuf.Any) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
return p.initState.Update(ctx, r)
|
||||
}
|
||||
|
||||
func (p *Init) update(ctx context.Context, r *google_protobuf.Any) error {
|
||||
var resources specs.LinuxResources
|
||||
if err := json.Unmarshal(r.Value, &resources); err != nil {
|
||||
return err
|
||||
}
|
||||
return p.runtime.Update(context, p.id, &resources)
|
||||
return p.runtime.Update(ctx, p.id, &resources)
|
||||
}
|
||||
|
||||
// Stdio of the process
|
||||
|
|
180
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go
generated
vendored
180
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init_state.go
generated
vendored
|
@ -30,16 +30,20 @@ import (
|
|||
runc "github.com/containerd/go-runc"
|
||||
google_protobuf "github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type initState interface {
|
||||
proc.State
|
||||
|
||||
Resize(console.WinSize) error
|
||||
Start(context.Context) error
|
||||
Delete(context.Context) error
|
||||
Pause(context.Context) error
|
||||
Resume(context.Context) error
|
||||
Update(context.Context, *google_protobuf.Any) error
|
||||
Checkpoint(context.Context, *CheckpointConfig) error
|
||||
Exec(context.Context, string, *ExecConfig) (proc.Process, error)
|
||||
Kill(context.Context, uint32, bool) error
|
||||
SetExited(int)
|
||||
}
|
||||
|
||||
type createdState struct {
|
||||
|
@ -61,43 +65,26 @@ func (s *createdState) transition(name string) error {
|
|||
}
|
||||
|
||||
func (s *createdState) Pause(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot pause task in created state")
|
||||
}
|
||||
|
||||
func (s *createdState) Resume(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot resume task in created state")
|
||||
}
|
||||
|
||||
func (s *createdState) Update(context context.Context, r *google_protobuf.Any) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.update(context, r)
|
||||
func (s *createdState) Update(ctx context.Context, r *google_protobuf.Any) error {
|
||||
return s.p.update(ctx, r)
|
||||
}
|
||||
|
||||
func (s *createdState) Checkpoint(context context.Context, r *CheckpointConfig) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
func (s *createdState) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
|
||||
return errors.Errorf("cannot checkpoint a task in created state")
|
||||
}
|
||||
|
||||
func (s *createdState) Resize(ws console.WinSize) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.resize(ws)
|
||||
}
|
||||
|
||||
func (s *createdState) Start(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
if err := s.p.start(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -105,8 +92,6 @@ func (s *createdState) Start(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (s *createdState) Delete(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
if err := s.p.delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -114,16 +99,10 @@ func (s *createdState) Delete(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (s *createdState) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.kill(ctx, sig, all)
|
||||
}
|
||||
|
||||
func (s *createdState) SetExited(status int) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
s.p.setExited(status)
|
||||
|
||||
if err := s.transition("stopped"); err != nil {
|
||||
|
@ -132,8 +111,6 @@ func (s *createdState) SetExited(status int) {
|
|||
}
|
||||
|
||||
func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
return s.p.exec(ctx, path, r)
|
||||
}
|
||||
|
||||
|
@ -157,43 +134,26 @@ func (s *createdCheckpointState) transition(name string) error {
|
|||
}
|
||||
|
||||
func (s *createdCheckpointState) Pause(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot pause task in created state")
|
||||
}
|
||||
|
||||
func (s *createdCheckpointState) Resume(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot resume task in created state")
|
||||
}
|
||||
|
||||
func (s *createdCheckpointState) Update(context context.Context, r *google_protobuf.Any) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.update(context, r)
|
||||
func (s *createdCheckpointState) Update(ctx context.Context, r *google_protobuf.Any) error {
|
||||
return s.p.update(ctx, r)
|
||||
}
|
||||
|
||||
func (s *createdCheckpointState) Checkpoint(context context.Context, r *CheckpointConfig) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
func (s *createdCheckpointState) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
|
||||
return errors.Errorf("cannot checkpoint a task in created state")
|
||||
}
|
||||
|
||||
func (s *createdCheckpointState) Resize(ws console.WinSize) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.resize(ws)
|
||||
}
|
||||
|
||||
func (s *createdCheckpointState) Start(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
p := s.p
|
||||
sio := p.stdio
|
||||
|
||||
|
@ -247,8 +207,6 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (s *createdCheckpointState) Delete(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
if err := s.p.delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -256,16 +214,10 @@ func (s *createdCheckpointState) Delete(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (s *createdCheckpointState) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.kill(ctx, sig, all)
|
||||
}
|
||||
|
||||
func (s *createdCheckpointState) SetExited(status int) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
s.p.setExited(status)
|
||||
|
||||
if err := s.transition("stopped"); err != nil {
|
||||
|
@ -274,9 +226,6 @@ func (s *createdCheckpointState) SetExited(status int) {
|
|||
}
|
||||
|
||||
func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return nil, errors.Errorf("cannot exec in a created state")
|
||||
}
|
||||
|
||||
|
@ -297,67 +246,42 @@ func (s *runningState) transition(name string) error {
|
|||
}
|
||||
|
||||
func (s *runningState) Pause(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
if err := s.p.pause(ctx); err != nil {
|
||||
return err
|
||||
if err := s.p.runtime.Pause(ctx, s.p.id); err != nil {
|
||||
return s.p.runtimeError(err, "OCI runtime pause failed")
|
||||
}
|
||||
|
||||
return s.transition("paused")
|
||||
}
|
||||
|
||||
func (s *runningState) Resume(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot resume a running process")
|
||||
}
|
||||
|
||||
func (s *runningState) Update(context context.Context, r *google_protobuf.Any) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.update(context, r)
|
||||
func (s *runningState) Update(ctx context.Context, r *google_protobuf.Any) error {
|
||||
return s.p.update(ctx, r)
|
||||
}
|
||||
|
||||
func (s *runningState) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.checkpoint(ctx, r)
|
||||
}
|
||||
|
||||
func (s *runningState) Resize(ws console.WinSize) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.resize(ws)
|
||||
}
|
||||
|
||||
func (s *runningState) Start(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot start a running process")
|
||||
}
|
||||
|
||||
func (s *runningState) Delete(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot delete a running process")
|
||||
}
|
||||
|
||||
func (s *runningState) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.kill(ctx, sig, all)
|
||||
}
|
||||
|
||||
func (s *runningState) SetExited(status int) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
s.p.setExited(status)
|
||||
|
||||
if err := s.transition("stopped"); err != nil {
|
||||
|
@ -366,8 +290,6 @@ func (s *runningState) SetExited(status int) {
|
|||
}
|
||||
|
||||
func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
return s.p.exec(ctx, path, r)
|
||||
}
|
||||
|
||||
|
@ -388,79 +310,54 @@ func (s *pausedState) transition(name string) error {
|
|||
}
|
||||
|
||||
func (s *pausedState) Pause(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot pause a paused container")
|
||||
}
|
||||
|
||||
func (s *pausedState) Resume(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
if err := s.p.resume(ctx); err != nil {
|
||||
return err
|
||||
if err := s.p.runtime.Resume(ctx, s.p.id); err != nil {
|
||||
return s.p.runtimeError(err, "OCI runtime resume failed")
|
||||
}
|
||||
|
||||
return s.transition("running")
|
||||
}
|
||||
|
||||
func (s *pausedState) Update(context context.Context, r *google_protobuf.Any) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.update(context, r)
|
||||
func (s *pausedState) Update(ctx context.Context, r *google_protobuf.Any) error {
|
||||
return s.p.update(ctx, r)
|
||||
}
|
||||
|
||||
func (s *pausedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.checkpoint(ctx, r)
|
||||
}
|
||||
|
||||
func (s *pausedState) Resize(ws console.WinSize) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.resize(ws)
|
||||
}
|
||||
|
||||
func (s *pausedState) Start(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot start a paused process")
|
||||
}
|
||||
|
||||
func (s *pausedState) Delete(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot delete a paused process")
|
||||
}
|
||||
|
||||
func (s *pausedState) Kill(ctx context.Context, sig uint32, all bool) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return s.p.kill(ctx, sig, all)
|
||||
}
|
||||
|
||||
func (s *pausedState) SetExited(status int) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
s.p.setExited(status)
|
||||
|
||||
if err := s.p.runtime.Resume(context.Background(), s.p.id); err != nil {
|
||||
logrus.WithError(err).Error("resuming exited container from paused state")
|
||||
}
|
||||
|
||||
if err := s.transition("stopped"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return nil, errors.Errorf("cannot exec in a paused state")
|
||||
}
|
||||
|
||||
|
@ -479,50 +376,30 @@ func (s *stoppedState) transition(name string) error {
|
|||
}
|
||||
|
||||
func (s *stoppedState) Pause(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot pause a stopped container")
|
||||
}
|
||||
|
||||
func (s *stoppedState) Resume(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot resume a stopped container")
|
||||
}
|
||||
|
||||
func (s *stoppedState) Update(context context.Context, r *google_protobuf.Any) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
func (s *stoppedState) Update(ctx context.Context, r *google_protobuf.Any) error {
|
||||
return errors.Errorf("cannot update a stopped container")
|
||||
}
|
||||
|
||||
func (s *stoppedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot checkpoint a stopped container")
|
||||
}
|
||||
|
||||
func (s *stoppedState) Resize(ws console.WinSize) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot resize a stopped container")
|
||||
}
|
||||
|
||||
func (s *stoppedState) Start(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return errors.Errorf("cannot start a stopped process")
|
||||
}
|
||||
|
||||
func (s *stoppedState) Delete(ctx context.Context) error {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
if err := s.p.delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -538,8 +415,5 @@ func (s *stoppedState) SetExited(status int) {
|
|||
}
|
||||
|
||||
func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) {
|
||||
s.p.mu.Lock()
|
||||
defer s.p.mu.Unlock()
|
||||
|
||||
return nil, errors.Errorf("cannot exec in a stopped state")
|
||||
}
|
||||
|
|
3
vendor/github.com/containerd/containerd/runtime/v1/shim/reaper.go
generated
vendored
3
vendor/github.com/containerd/containerd/runtime/v1/shim/reaper.go
generated
vendored
|
@ -31,7 +31,7 @@ import (
|
|||
// ErrNoSuchProcess is returned when the process no longer exists
|
||||
var ErrNoSuchProcess = errors.New("no such process")
|
||||
|
||||
const bufferSize = 32
|
||||
const bufferSize = 2048
|
||||
|
||||
// Reap should be called when the process receives an SIGCHLD. Reap will reap
|
||||
// all exited processes and close their wait channels
|
||||
|
@ -47,7 +47,6 @@ func Reap() error {
|
|||
Status: e.Status,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
Default.Unlock()
|
||||
return err
|
||||
|
|
156
vendor/github.com/containerd/containerd/runtime/v1/shim/service.go
generated
vendored
156
vendor/github.com/containerd/containerd/runtime/v1/shim/service.go
generated
vendored
|
@ -114,9 +114,6 @@ type Service struct {
|
|||
|
||||
// Create a new initial process and container with the underlying OCI runtime
|
||||
func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
var mounts []proc.Mount
|
||||
for _, m := range r.Rootfs {
|
||||
mounts = append(mounts, proc.Mount{
|
||||
|
@ -158,6 +155,10 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *
|
|||
return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m)
|
||||
}
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
process, err := newInit(
|
||||
ctx,
|
||||
s.config.Path,
|
||||
|
@ -187,11 +188,9 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *
|
|||
|
||||
// Start a process
|
||||
func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.StartResponse, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
p := s.processes[r.ID]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s", r.ID)
|
||||
p, err := s.getExecProcess(r.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p.Start(ctx); err != nil {
|
||||
return nil, err
|
||||
|
@ -204,16 +203,16 @@ func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.
|
|||
|
||||
// Delete the initial process and container
|
||||
func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteResponse, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
p := s.processes[s.id]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
p, err := s.getInitProcess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p.Delete(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.mu.Lock()
|
||||
delete(s.processes, s.id)
|
||||
s.mu.Unlock()
|
||||
s.platform.Close()
|
||||
return &shimapi.DeleteResponse{
|
||||
ExitStatus: uint32(p.ExitStatus()),
|
||||
|
@ -227,11 +226,9 @@ func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessReq
|
|||
if r.ID == s.id {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess")
|
||||
}
|
||||
s.mu.Lock()
|
||||
p := s.processes[r.ID]
|
||||
s.mu.Unlock()
|
||||
if p == nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "process %s", r.ID)
|
||||
p, err := s.getExecProcess(r.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p.Delete(ctx); err != nil {
|
||||
return nil, err
|
||||
|
@ -249,13 +246,14 @@ func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessReq
|
|||
// Exec an additional process inside the container
|
||||
func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*ptypes.Empty, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if p := s.processes[r.ID]; p != nil {
|
||||
s.mu.Unlock()
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ID)
|
||||
}
|
||||
|
||||
p := s.processes[s.id]
|
||||
s.mu.Unlock()
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
}
|
||||
|
@ -271,14 +269,14 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*pty
|
|||
if err != nil {
|
||||
return nil, errdefs.ToGRPC(err)
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.processes[r.ID] = process
|
||||
s.mu.Unlock()
|
||||
return empty, nil
|
||||
}
|
||||
|
||||
// ResizePty of a process
|
||||
func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*ptypes.Empty, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if r.ID == "" {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "id not provided")
|
||||
}
|
||||
|
@ -286,7 +284,9 @@ func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*
|
|||
Width: uint16(r.Width),
|
||||
Height: uint16(r.Height),
|
||||
}
|
||||
s.mu.Lock()
|
||||
p := s.processes[r.ID]
|
||||
s.mu.Unlock()
|
||||
if p == nil {
|
||||
return nil, errors.Errorf("process does not exist %s", r.ID)
|
||||
}
|
||||
|
@ -298,11 +298,9 @@ func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*
|
|||
|
||||
// State returns runtime state information for a process
|
||||
func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
p := s.processes[r.ID]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process id %s", r.ID)
|
||||
p, err := s.getExecProcess(r.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := p.Status(ctx)
|
||||
if err != nil {
|
||||
|
@ -338,11 +336,9 @@ func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.
|
|||
|
||||
// Pause the container
|
||||
func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
p := s.processes[s.id]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
p, err := s.getInitProcess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p.(*proc.Init).Pause(ctx); err != nil {
|
||||
return nil, err
|
||||
|
@ -352,11 +348,9 @@ func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, er
|
|||
|
||||
// Resume the container
|
||||
func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
p := s.processes[s.id]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
p, err := s.getInitProcess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p.(*proc.Init).Resume(ctx); err != nil {
|
||||
return nil, err
|
||||
|
@ -366,12 +360,10 @@ func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, e
|
|||
|
||||
// Kill a process with the provided signal
|
||||
func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Empty, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if r.ID == "" {
|
||||
p := s.processes[s.id]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
p, err := s.getInitProcess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p.Kill(ctx, r.Signal, r.All); err != nil {
|
||||
return nil, errdefs.ToGRPC(err)
|
||||
|
@ -379,9 +371,9 @@ func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Emp
|
|||
return empty, nil
|
||||
}
|
||||
|
||||
p := s.processes[r.ID]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process id %s not found", r.ID)
|
||||
p, err := s.getExecProcess(r.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p.Kill(ctx, r.Signal, r.All); err != nil {
|
||||
return nil, errdefs.ToGRPC(err)
|
||||
|
@ -422,11 +414,9 @@ func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*sh
|
|||
|
||||
// CloseIO of a process
|
||||
func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptypes.Empty, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
p := s.processes[r.ID]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process does not exist %s", r.ID)
|
||||
p, err := s.getExecProcess(r.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stdin := p.Stdin(); stdin != nil {
|
||||
if err := stdin.Close(); err != nil {
|
||||
|
@ -438,11 +428,9 @@ func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptyp
|
|||
|
||||
// Checkpoint the container
|
||||
func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
p := s.processes[s.id]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
p, err := s.getInitProcess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var options runctypes.CheckpointOptions
|
||||
if r.Options != nil {
|
||||
|
@ -475,11 +463,9 @@ func (s *Service) ShimInfo(ctx context.Context, r *ptypes.Empty) (*shimapi.ShimI
|
|||
|
||||
// Update a running container
|
||||
func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
p := s.processes[s.id]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
p, err := s.getInitProcess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p.(*proc.Init).Update(ctx, r.Resources); err != nil {
|
||||
return nil, errdefs.ToGRPC(err)
|
||||
|
@ -489,11 +475,9 @@ func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*pt
|
|||
|
||||
// Wait for a process to exit
|
||||
func (s *Service) Wait(ctx context.Context, r *shimapi.WaitRequest) (*shimapi.WaitResponse, error) {
|
||||
s.mu.Lock()
|
||||
p := s.processes[r.ID]
|
||||
s.mu.Unlock()
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
p, err := s.getExecProcess(r.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Wait()
|
||||
|
||||
|
@ -509,16 +493,24 @@ func (s *Service) processExits() {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Service) checkProcesses(e runc.Exit) {
|
||||
func (s *Service) allProcesses() []rproc.Process {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
res := make([]rproc.Process, 0, len(s.processes))
|
||||
for _, p := range s.processes {
|
||||
res = append(res, p)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *Service) checkProcesses(e runc.Exit) {
|
||||
shouldKillAll, err := shouldKillAllOnExit(s.bundle)
|
||||
if err != nil {
|
||||
log.G(s.context).WithError(err).Error("failed to check shouldKillAll")
|
||||
}
|
||||
|
||||
for _, p := range s.processes {
|
||||
for _, p := range s.allProcesses() {
|
||||
if p.Pid() == e.Pid {
|
||||
|
||||
if shouldKillAll {
|
||||
|
@ -563,11 +555,9 @@ func shouldKillAllOnExit(bundlePath string) (bool, error) {
|
|||
}
|
||||
|
||||
func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
p := s.processes[s.id]
|
||||
if p == nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
p, err := s.getInitProcess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ps, err := p.(*proc.Init).Runtime().Ps(ctx, id)
|
||||
|
@ -589,6 +579,30 @@ func (s *Service) forward(publisher events.Publisher) {
|
|||
}
|
||||
}
|
||||
|
||||
// getInitProcess returns initial process
|
||||
func (s *Service) getInitProcess() (rproc.Process, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
p := s.processes[s.id]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// getExecProcess returns exec process
|
||||
func (s *Service) getExecProcess(id string) (rproc.Process, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
p := s.processes[id]
|
||||
if p == nil {
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s does not exist", id)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func getTopic(ctx context.Context, e interface{}) string {
|
||||
switch e.(type) {
|
||||
case *eventstypes.TaskCreate:
|
||||
|
|
10
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
10
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
|
@ -20,8 +20,8 @@ github.com/gogo/protobuf v1.0.0
|
|||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||
github.com/golang/protobuf v1.1.0
|
||||
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 # v1.0.1-45-geba862d
|
||||
github.com/opencontainers/runc 58592df56734acf62e574865fe40b9e53e967910
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/opencontainers/runc 10d38b660a77168360df3522881e2dc2be5056bd
|
||||
github.com/sirupsen/logrus v1.0.3
|
||||
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
|
||||
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
|
||||
google.golang.org/grpc v1.12.0
|
||||
|
@ -33,7 +33,7 @@ golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
|||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
||||
github.com/Microsoft/go-winio v0.4.11
|
||||
github.com/Microsoft/hcsshim v0.7.12
|
||||
github.com/Microsoft/hcsshim v0.8.1
|
||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||
github.com/containerd/ttrpc 2a805f71863501300ae1976d29f0454ae003e85a
|
||||
|
@ -81,9 +81,9 @@ k8s.io/kubernetes v1.12.0
|
|||
k8s.io/utils cd34563cd63c2bd7c6fe88a73c4dcf34ed8a67cb
|
||||
|
||||
# zfs dependencies
|
||||
github.com/containerd/zfs 9a0b8b8b5982014b729cd34eb7cd7a11062aa6ec
|
||||
github.com/containerd/zfs 9f6ef3b1fe5144bd91fe5855b4eba81bc0d17d03
|
||||
github.com/mistifyio/go-zfs 166add352731e515512690329794ee593f1aaff2
|
||||
github.com/pborman/uuid c65b2f87fee37d1c7854c9164a450713c28d50cd
|
||||
|
||||
# aufs dependencies
|
||||
github.com/containerd/aufs ffa39970e26ad01d81f540b21e65f9c1841a5f92
|
||||
github.com/containerd/aufs da3cf16bfbe68ba8f114f1536a05c01528a25434
|
||||
|
|
1
vendor/github.com/opencontainers/runc/README.md
generated
vendored
1
vendor/github.com/opencontainers/runc/README.md
generated
vendored
|
@ -68,6 +68,7 @@ make BUILDTAGS='seccomp apparmor'
|
|||
| selinux | selinux process and mount labeling | <none> |
|
||||
| apparmor | apparmor profile support | <none> |
|
||||
| ambient | ambient capability support | kernel 4.3 |
|
||||
| nokmem | disable kernel memory account | <none> |
|
||||
|
||||
|
||||
### Running the test suite
|
||||
|
|
1
vendor/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
1
vendor/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
|
@ -148,6 +148,7 @@ config := &configs.Config{
|
|||
{Type: configs.NEWPID},
|
||||
{Type: configs.NEWUSER},
|
||||
{Type: configs.NEWNET},
|
||||
{Type: configs.NEWCGROUP},
|
||||
}),
|
||||
Cgroups: &configs.Cgroup{
|
||||
Name: "test-container",
|
||||
|
|
8
vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
generated
vendored
8
vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
generated
vendored
|
@ -17,7 +17,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
cgroupNamePrefix = "name="
|
||||
CgroupNamePrefix = "name="
|
||||
CgroupProcesses = "cgroup.procs"
|
||||
)
|
||||
|
||||
|
@ -156,8 +156,8 @@ func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount,
|
|||
continue
|
||||
}
|
||||
ss[opt] = true
|
||||
if strings.HasPrefix(opt, cgroupNamePrefix) {
|
||||
opt = opt[len(cgroupNamePrefix):]
|
||||
if strings.HasPrefix(opt, CgroupNamePrefix) {
|
||||
opt = opt[len(CgroupNamePrefix):]
|
||||
}
|
||||
m.Subsystems = append(m.Subsystems, opt)
|
||||
numFound++
|
||||
|
@ -343,7 +343,7 @@ func getControllerPath(subsystem string, cgroups map[string]string) (string, err
|
|||
return p, nil
|
||||
}
|
||||
|
||||
if p, ok := cgroups[cgroupNamePrefix+subsystem]; ok {
|
||||
if p, ok := cgroups[CgroupNamePrefix+subsystem]; ok {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
|
|
16
vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go
generated
vendored
16
vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go
generated
vendored
|
@ -7,12 +7,13 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
NEWNET NamespaceType = "NEWNET"
|
||||
NEWPID NamespaceType = "NEWPID"
|
||||
NEWNS NamespaceType = "NEWNS"
|
||||
NEWUTS NamespaceType = "NEWUTS"
|
||||
NEWIPC NamespaceType = "NEWIPC"
|
||||
NEWUSER NamespaceType = "NEWUSER"
|
||||
NEWNET NamespaceType = "NEWNET"
|
||||
NEWPID NamespaceType = "NEWPID"
|
||||
NEWNS NamespaceType = "NEWNS"
|
||||
NEWUTS NamespaceType = "NEWUTS"
|
||||
NEWIPC NamespaceType = "NEWIPC"
|
||||
NEWUSER NamespaceType = "NEWUSER"
|
||||
NEWCGROUP NamespaceType = "NEWCGROUP"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -35,6 +36,8 @@ func NsName(ns NamespaceType) string {
|
|||
return "user"
|
||||
case NEWUTS:
|
||||
return "uts"
|
||||
case NEWCGROUP:
|
||||
return "cgroup"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@ -68,6 +71,7 @@ func NamespaceTypes() []NamespaceType {
|
|||
NEWNET,
|
||||
NEWPID,
|
||||
NEWNS,
|
||||
NEWCGROUP,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
13
vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
generated
vendored
13
vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
generated
vendored
|
@ -9,12 +9,13 @@ func (n *Namespace) Syscall() int {
|
|||
}
|
||||
|
||||
var namespaceInfo = map[NamespaceType]int{
|
||||
NEWNET: unix.CLONE_NEWNET,
|
||||
NEWNS: unix.CLONE_NEWNS,
|
||||
NEWUSER: unix.CLONE_NEWUSER,
|
||||
NEWIPC: unix.CLONE_NEWIPC,
|
||||
NEWUTS: unix.CLONE_NEWUTS,
|
||||
NEWPID: unix.CLONE_NEWPID,
|
||||
NEWNET: unix.CLONE_NEWNET,
|
||||
NEWNS: unix.CLONE_NEWNS,
|
||||
NEWUSER: unix.CLONE_NEWUSER,
|
||||
NEWIPC: unix.CLONE_NEWIPC,
|
||||
NEWUTS: unix.CLONE_NEWUTS,
|
||||
NEWPID: unix.CLONE_NEWPID,
|
||||
NEWCGROUP: unix.CLONE_NEWCGROUP,
|
||||
}
|
||||
|
||||
// CloneFlags parses the container's Namespaces options to set the correct
|
||||
|
|
63
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
63
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
|
@ -42,6 +42,12 @@ enum sync_t {
|
|||
SYNC_ERR = 0xFF, /* Fatal error, no turning back. The error code follows. */
|
||||
};
|
||||
|
||||
/*
|
||||
* Synchronisation value for cgroup namespace setup.
|
||||
* The same constant is defined in process_linux.go as "createCgroupns".
|
||||
*/
|
||||
#define CREATECGROUPNS 0x80
|
||||
|
||||
/* longjmp() arguments. */
|
||||
#define JUMP_PARENT 0x00
|
||||
#define JUMP_CHILD 0xA0
|
||||
|
@ -640,7 +646,6 @@ void nsexec(void)
|
|||
case JUMP_PARENT:{
|
||||
int len;
|
||||
pid_t child, first_child = -1;
|
||||
char buf[JSON_MAX];
|
||||
bool ready = false;
|
||||
|
||||
/* For debugging. */
|
||||
|
@ -716,6 +721,18 @@ void nsexec(void)
|
|||
kill(child, SIGKILL);
|
||||
bail("failed to sync with child: write(SYNC_RECVPID_ACK)");
|
||||
}
|
||||
|
||||
/* Send the init_func pid back to our parent.
|
||||
*
|
||||
* Send the init_func pid and the pid of the first child back to our parent.
|
||||
* We need to send both back because we can't reap the first child we created (CLONE_PARENT).
|
||||
* It becomes the responsibility of our parent to reap the first child.
|
||||
*/
|
||||
len = dprintf(pipenum, "{\"pid\": %d, \"pid_first\": %d}\n", child, first_child);
|
||||
if (len < 0) {
|
||||
kill(child, SIGKILL);
|
||||
bail("unable to generate JSON for child pid");
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SYNC_CHILD_READY:
|
||||
|
@ -759,23 +776,6 @@ void nsexec(void)
|
|||
bail("unexpected sync value: %u", s);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Send the init_func pid and the pid of the first child back to our parent.
|
||||
*
|
||||
* We need to send both back because we can't reap the first child we created (CLONE_PARENT).
|
||||
* It becomes the responsibility of our parent to reap the first child.
|
||||
*/
|
||||
len = snprintf(buf, JSON_MAX, "{\"pid\": %d, \"pid_first\": %d}\n", child, first_child);
|
||||
if (len < 0) {
|
||||
kill(child, SIGKILL);
|
||||
bail("unable to generate JSON for child pid");
|
||||
}
|
||||
if (write(pipenum, buf, len) != len) {
|
||||
kill(child, SIGKILL);
|
||||
bail("unable to send child pid to bootstrapper");
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
|
@ -862,14 +862,17 @@ void nsexec(void)
|
|||
if (setresuid(0, 0, 0) < 0)
|
||||
bail("failed to become root in user namespace");
|
||||
}
|
||||
|
||||
/*
|
||||
* Unshare all of the namespaces. Note that we don't merge this
|
||||
* with clone() because there were some old kernel versions where
|
||||
* clone(CLONE_PARENT | CLONE_NEWPID) was broken, so we'll just do
|
||||
* it the long way.
|
||||
* Unshare all of the namespaces. Now, it should be noted that this
|
||||
* ordering might break in the future (especially with rootless
|
||||
* containers). But for now, it's not possible to split this into
|
||||
* CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues.
|
||||
*
|
||||
* Note that we don't merge this with clone() because there were
|
||||
* some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID)
|
||||
* was broken, so we'll just do it the long way anyway.
|
||||
*/
|
||||
if (unshare(config.cloneflags) < 0)
|
||||
if (unshare(config.cloneflags & ~CLONE_NEWCGROUP) < 0)
|
||||
bail("failed to unshare namespaces");
|
||||
|
||||
/*
|
||||
|
@ -958,6 +961,18 @@ void nsexec(void)
|
|||
bail("setgroups failed");
|
||||
}
|
||||
|
||||
/* ... wait until our topmost parent has finished cgroup setup in p.manager.Apply() ... */
|
||||
if (config.cloneflags & CLONE_NEWCGROUP) {
|
||||
uint8_t value;
|
||||
if (read(pipenum, &value, sizeof(value)) != sizeof(value))
|
||||
bail("read synchronisation value failed");
|
||||
if (value == CREATECGROUPNS) {
|
||||
if (unshare(CLONE_NEWCGROUP) < 0)
|
||||
bail("failed to unshare cgroup namespace");
|
||||
} else
|
||||
bail("received unknown synchronisation value");
|
||||
}
|
||||
|
||||
s = SYNC_CHILD_READY;
|
||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s))
|
||||
bail("failed to sync with patent: write(SYNC_CHILD_READY)");
|
||||
|
|
Loading…
Reference in a new issue