2016-06-13 22:52:49 -04:00
|
|
|
package container
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2016-06-20 20:44:45 -04:00
|
|
|
"os"
|
2016-11-10 15:13:26 -05:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2016-10-26 04:14:15 -04:00
|
|
|
"time"
|
2016-06-13 22:52:49 -04:00
|
|
|
|
2016-09-06 14:46:37 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
|
|
|
"github.com/docker/docker/api/types/events"
|
2016-06-13 22:52:49 -04:00
|
|
|
executorpkg "github.com/docker/docker/daemon/cluster/executor"
|
2016-11-10 15:13:26 -05:00
|
|
|
"github.com/docker/go-connections/nat"
|
2016-08-09 16:37:11 -04:00
|
|
|
"github.com/docker/libnetwork"
|
2016-06-13 22:52:49 -04:00
|
|
|
"github.com/docker/swarmkit/agent/exec"
|
|
|
|
"github.com/docker/swarmkit/api"
|
|
|
|
"github.com/docker/swarmkit/log"
|
2017-01-23 18:50:10 -05:00
|
|
|
gogotypes "github.com/gogo/protobuf/types"
|
2016-06-16 01:41:43 -04:00
|
|
|
"github.com/pkg/errors"
|
2016-06-13 22:52:49 -04:00
|
|
|
"golang.org/x/net/context"
|
2016-10-26 04:14:15 -04:00
|
|
|
"golang.org/x/time/rate"
|
2016-06-13 22:52:49 -04:00
|
|
|
)
|
|
|
|
|
2017-02-17 13:34:21 -05:00
|
|
|
const defaultGossipConvergeDelay = 2 * time.Second
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
// controller implements agent.Controller against docker's API.
|
|
|
|
//
|
|
|
|
// Most operations against docker's API are done through the container name,
|
|
|
|
// which is unique to the task.
|
|
|
|
type controller struct {
|
2017-05-24 11:22:09 -04:00
|
|
|
task *api.Task
|
|
|
|
adapter *containerAdapter
|
|
|
|
closed chan struct{}
|
|
|
|
err error
|
2016-07-25 23:59:02 -04:00
|
|
|
pulled chan struct{} // closed after pull
|
|
|
|
cancelPull func() // cancels pull context if not nil
|
|
|
|
pullErr error // pull error, only read after pulled closed
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var _ exec.Controller = &controller{}
|
|
|
|
|
2016-08-09 14:50:20 -04:00
|
|
|
// NewController returns a docker exec runner for the provided task.
|
2017-03-16 17:23:33 -04:00
|
|
|
func newController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*controller, error) {
|
|
|
|
adapter, err := newContainerAdapter(b, task, dependencies)
|
2016-06-13 22:52:49 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &controller{
|
|
|
|
task: task,
|
|
|
|
adapter: adapter,
|
|
|
|
closed: make(chan struct{}),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *controller) Task() (*api.Task, error) {
|
|
|
|
return r.task, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ContainerStatus returns the container-specific status for the task.
|
|
|
|
func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
|
|
|
|
ctnr, err := r.adapter.inspect(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if isUnknownContainer(err) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return parseContainerStatus(ctnr)
|
|
|
|
}
|
|
|
|
|
2016-11-10 15:13:26 -05:00
|
|
|
func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) {
|
|
|
|
ctnr, err := r.adapter.inspect(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if isUnknownContainer(err) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return parsePortStatus(ctnr)
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
// Update tasks a recent task update and applies it to the container.
|
|
|
|
func (r *controller) Update(ctx context.Context, t *api.Task) error {
|
|
|
|
// TODO(stevvooe): While assignment of tasks is idempotent, we do allow
|
|
|
|
// updates of metadata, such as labelling, as well as any other properties
|
|
|
|
// that make sense.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare creates a container and ensures the image is pulled.
|
|
|
|
//
|
|
|
|
// If the container has already be created, exec.ErrTaskPrepared is returned.
|
|
|
|
func (r *controller) Prepare(ctx context.Context) error {
|
|
|
|
if err := r.checkClosed(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure all the networks that the task needs are created.
|
|
|
|
if err := r.adapter.createNetworks(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure all the volumes that the task needs are created.
|
2016-08-18 01:44:18 -04:00
|
|
|
if err := r.adapter.createVolumes(ctx); err != nil {
|
2016-06-13 22:52:49 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-06-20 20:44:45 -04:00
|
|
|
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
|
2016-07-25 23:59:02 -04:00
|
|
|
if r.pulled == nil {
|
|
|
|
// Fork the pull to a different context to allow pull to continue
|
|
|
|
// on re-entrant calls to Prepare. This ensures that Prepare can be
|
|
|
|
// idempotent and not incur the extra cost of pulling when
|
|
|
|
// cancelled on updates.
|
|
|
|
var pctx context.Context
|
|
|
|
|
|
|
|
r.pulled = make(chan struct{})
|
|
|
|
pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller.
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(r.pulled)
|
|
|
|
r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled
|
|
|
|
}()
|
|
|
|
}
|
2016-07-25 17:52:27 -04:00
|
|
|
|
2016-07-25 23:59:02 -04:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case <-r.pulled:
|
|
|
|
if r.pullErr != nil {
|
|
|
|
// NOTE(stevvooe): We always try to pull the image to make sure we have
|
|
|
|
// the most up to date version. This will return an error, but we only
|
|
|
|
// log it. If the image truly doesn't exist, the create below will
|
|
|
|
// error out.
|
|
|
|
//
|
|
|
|
// This gives us some nice behavior where we use up to date versions of
|
|
|
|
// mutable tags, but will still run if the old image is available but a
|
|
|
|
// registry is down.
|
|
|
|
//
|
|
|
|
// If you don't want this behavior, lock down your image to an
|
|
|
|
// immutable tag or digest.
|
|
|
|
log.G(ctx).WithError(r.pullErr).Error("pulling image failed")
|
|
|
|
}
|
2016-06-20 20:44:45 -04:00
|
|
|
}
|
2016-06-16 01:41:43 -04:00
|
|
|
}
|
2016-08-18 01:44:18 -04:00
|
|
|
if err := r.adapter.create(ctx); err != nil {
|
2016-06-16 01:41:43 -04:00
|
|
|
if isContainerCreateNameConflict(err) {
|
|
|
|
if _, err := r.adapter.inspect(ctx); err != nil {
|
2016-06-13 22:52:49 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-06-16 01:41:43 -04:00
|
|
|
// container is already created. success!
|
|
|
|
return exec.ErrTaskPrepared
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
2016-06-16 01:41:43 -04:00
|
|
|
return err
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the container. An error will be returned if the container is already started.
|
|
|
|
func (r *controller) Start(ctx context.Context) error {
|
|
|
|
if err := r.checkClosed(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ctnr, err := r.adapter.inspect(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Detect whether the container has *ever* been started. If so, we don't
|
|
|
|
// issue the start.
|
|
|
|
//
|
|
|
|
// TODO(stevvooe): This is very racy. While reading inspect, another could
|
|
|
|
// start the process and we could end up starting it twice.
|
|
|
|
if ctnr.State.Status != "created" {
|
|
|
|
return exec.ErrTaskStarted
|
|
|
|
}
|
|
|
|
|
2016-08-09 16:37:11 -04:00
|
|
|
for {
|
|
|
|
if err := r.adapter.start(ctx); err != nil {
|
|
|
|
if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok {
|
|
|
|
// Retry network creation again if we
|
|
|
|
// failed because some of the networks
|
|
|
|
// were not found.
|
|
|
|
if err := r.adapter.createNetworks(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
return errors.Wrap(err, "starting container failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
break
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
2016-07-12 13:56:01 -04:00
|
|
|
// no health check
|
2017-01-17 00:53:31 -05:00
|
|
|
if ctnr.Config == nil || ctnr.Config.Healthcheck == nil || len(ctnr.Config.Healthcheck.Test) == 0 || ctnr.Config.Healthcheck.Test[0] == "NONE" {
|
2016-09-18 02:30:39 -04:00
|
|
|
if err := r.adapter.activateServiceBinding(); err != nil {
|
|
|
|
log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name())
|
|
|
|
return err
|
|
|
|
}
|
2016-07-12 13:56:01 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait for container to be healthy
|
|
|
|
eventq := r.adapter.events(ctx)
|
|
|
|
|
|
|
|
var healthErr error
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case event := <-eventq:
|
|
|
|
if !r.matchevent(event) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch event.Action {
|
|
|
|
case "die": // exit on terminal events
|
|
|
|
ctnr, err := r.adapter.inspect(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "die event received")
|
|
|
|
} else if ctnr.State.ExitCode != 0 {
|
|
|
|
return &exitError{code: ctnr.State.ExitCode, cause: healthErr}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
case "destroy":
|
|
|
|
// If we get here, something has gone wrong but we want to exit
|
|
|
|
// and report anyways.
|
|
|
|
return ErrContainerDestroyed
|
|
|
|
case "health_status: unhealthy":
|
|
|
|
// in this case, we stop the container and report unhealthy status
|
|
|
|
if err := r.Shutdown(ctx); err != nil {
|
|
|
|
return errors.Wrap(err, "unhealthy container shutdown failed")
|
|
|
|
}
|
|
|
|
// set health check error, and wait for container to fully exit ("die" event)
|
|
|
|
healthErr = ErrContainerUnhealthy
|
|
|
|
case "health_status: healthy":
|
2016-09-18 02:30:39 -04:00
|
|
|
if err := r.adapter.activateServiceBinding(); err != nil {
|
|
|
|
log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name())
|
|
|
|
return err
|
|
|
|
}
|
2016-07-12 13:56:01 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case <-r.closed:
|
|
|
|
return r.err
|
|
|
|
}
|
|
|
|
}
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait on the container to exit.
|
|
|
|
func (r *controller) Wait(pctx context.Context) error {
|
|
|
|
if err := r.checkClosed(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(pctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2016-06-27 21:08:56 -04:00
|
|
|
healthErr := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
ectx, cancel := context.WithCancel(ctx) // cancel event context on first event
|
|
|
|
defer cancel()
|
|
|
|
if err := r.checkHealth(ectx); err == ErrContainerUnhealthy {
|
|
|
|
healthErr <- ErrContainerUnhealthy
|
|
|
|
if err := r.Shutdown(ectx); err != nil {
|
|
|
|
log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2017-03-30 16:52:40 -04:00
|
|
|
waitC, err := r.adapter.wait(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
2016-06-27 21:08:56 -04:00
|
|
|
|
2017-03-30 16:52:40 -04:00
|
|
|
if status := <-waitC; status.ExitCode() != 0 {
|
|
|
|
exitErr := &exitError{
|
|
|
|
code: status.ExitCode(),
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
2017-03-30 16:52:40 -04:00
|
|
|
|
|
|
|
// Set the cause if it is knowable.
|
2016-06-27 21:08:56 -04:00
|
|
|
select {
|
|
|
|
case e := <-healthErr:
|
2017-03-30 16:52:40 -04:00
|
|
|
exitErr.cause = e
|
2016-06-27 21:08:56 -04:00
|
|
|
default:
|
2017-03-30 16:52:40 -04:00
|
|
|
if status.Err() != nil {
|
|
|
|
exitErr.cause = status.Err()
|
2016-06-27 21:08:56 -04:00
|
|
|
}
|
|
|
|
}
|
2017-03-30 16:52:40 -04:00
|
|
|
|
|
|
|
return exitErr
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
2016-06-27 21:08:56 -04:00
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-02-28 19:12:24 -05:00
|
|
|
func (r *controller) hasServiceBinding() bool {
|
|
|
|
if r.task == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// service is attached to a network besides the default bridge
|
|
|
|
for _, na := range r.task.Networks {
|
|
|
|
if na.Network == nil ||
|
|
|
|
na.Network.DriverState == nil ||
|
|
|
|
na.Network.DriverState.Name == "bridge" && na.Network.Spec.Annotations.Name == "bridge" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
// Shutdown the container cleanly.
|
|
|
|
func (r *controller) Shutdown(ctx context.Context) error {
|
|
|
|
if err := r.checkClosed(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-25 23:59:02 -04:00
|
|
|
if r.cancelPull != nil {
|
|
|
|
r.cancelPull()
|
|
|
|
}
|
|
|
|
|
2017-02-28 19:12:24 -05:00
|
|
|
if r.hasServiceBinding() {
|
|
|
|
// remove container from service binding
|
|
|
|
if err := r.adapter.deactivateServiceBinding(); err != nil {
|
|
|
|
log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name())
|
|
|
|
// Don't return an error here, because failure to deactivate
|
|
|
|
// the service binding is expected if the container was never
|
|
|
|
// started.
|
|
|
|
}
|
2016-09-18 02:30:39 -04:00
|
|
|
|
2017-02-28 19:12:24 -05:00
|
|
|
// add a delay for gossip converge
|
2017-05-21 19:24:07 -04:00
|
|
|
// TODO(dongluochen): this delay should be configurable to fit different cluster size and network delay.
|
2017-02-28 19:12:24 -05:00
|
|
|
time.Sleep(defaultGossipConvergeDelay)
|
2016-09-18 02:30:39 -04:00
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
if err := r.adapter.shutdown(ctx); err != nil {
|
|
|
|
if isUnknownContainer(err) || isStoppedContainer(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate the container, with force.
|
|
|
|
func (r *controller) Terminate(ctx context.Context) error {
|
|
|
|
if err := r.checkClosed(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-25 23:59:02 -04:00
|
|
|
if r.cancelPull != nil {
|
|
|
|
r.cancelPull()
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
if err := r.adapter.terminate(ctx); err != nil {
|
|
|
|
if isUnknownContainer(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the container and its resources.
|
|
|
|
func (r *controller) Remove(ctx context.Context) error {
|
|
|
|
if err := r.checkClosed(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-25 23:59:02 -04:00
|
|
|
if r.cancelPull != nil {
|
|
|
|
r.cancelPull()
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
// It may be necessary to shut down the task before removing it.
|
|
|
|
if err := r.Shutdown(ctx); err != nil {
|
|
|
|
if isUnknownContainer(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// This may fail if the task was already shut down.
|
|
|
|
log.G(ctx).WithError(err).Debug("shutdown failed on removal")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try removing networks referenced in this task in case this
|
|
|
|
// task is the last one referencing it
|
|
|
|
if err := r.adapter.removeNetworks(ctx); err != nil {
|
|
|
|
if isUnknownContainer(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := r.adapter.remove(ctx); err != nil {
|
|
|
|
if isUnknownContainer(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-26 04:14:15 -04:00
|
|
|
// waitReady waits for a container to be "ready".
|
|
|
|
// Ready means it's past the started state.
|
|
|
|
func (r *controller) waitReady(pctx context.Context) error {
|
|
|
|
if err := r.checkClosed(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(pctx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
eventq := r.adapter.events(ctx)
|
|
|
|
|
|
|
|
ctnr, err := r.adapter.inspect(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if !isUnknownContainer(err) {
|
|
|
|
return errors.Wrap(err, "inspect container failed")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch ctnr.State.Status {
|
|
|
|
case "running", "exited", "dead":
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case event := <-eventq:
|
|
|
|
if !r.matchevent(event) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch event.Action {
|
|
|
|
case "start":
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case <-r.closed:
|
|
|
|
return r.err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error {
|
|
|
|
if err := r.checkClosed(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-03 19:04:51 -04:00
|
|
|
// if we're following, wait for this container to be ready. there is a
|
|
|
|
// problem here: if the container will never be ready (for example, it has
|
|
|
|
// been totally deleted) then this will wait forever. however, this doesn't
|
|
|
|
// actually cause any UI issues, and shouldn't be a problem. the stuck wait
|
|
|
|
// will go away when the follow (context) is canceled.
|
|
|
|
if options.Follow {
|
|
|
|
if err := r.waitReady(ctx); err != nil {
|
|
|
|
return errors.Wrap(err, "container not ready for logs")
|
|
|
|
}
|
2016-10-26 04:14:15 -04:00
|
|
|
}
|
2017-05-03 19:04:51 -04:00
|
|
|
// if we're not following, we're not gonna wait for the container to be
|
|
|
|
// ready. just call logs. if the container isn't ready, the call will fail
|
|
|
|
// and return an error. no big deal, we don't care, we only want the logs
|
|
|
|
// we can get RIGHT NOW with no follow
|
2016-10-26 04:14:15 -04:00
|
|
|
|
2017-03-20 13:07:04 -04:00
|
|
|
logsContext, cancel := context.WithCancel(ctx)
|
|
|
|
msgs, err := r.adapter.logs(logsContext, options)
|
|
|
|
defer cancel()
|
2016-10-26 04:14:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed getting container logs")
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
// use a rate limiter to keep things under control but also provides some
|
|
|
|
// ability coalesce messages.
|
|
|
|
limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s
|
|
|
|
msgctx = api.LogContext{
|
|
|
|
NodeID: r.task.NodeID,
|
|
|
|
ServiceID: r.task.ServiceID,
|
|
|
|
TaskID: r.task.ID,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
for {
|
2017-03-20 13:07:04 -04:00
|
|
|
msg, ok := <-msgs
|
|
|
|
if !ok {
|
|
|
|
// we're done here, no more messages
|
|
|
|
return nil
|
2016-10-26 04:14:15 -04:00
|
|
|
}
|
|
|
|
|
2017-03-20 13:07:04 -04:00
|
|
|
if msg.Err != nil {
|
|
|
|
// the defered cancel closes the adapter's log stream
|
|
|
|
return msg.Err
|
2016-10-26 04:14:15 -04:00
|
|
|
}
|
|
|
|
|
2017-03-20 13:07:04 -04:00
|
|
|
// wait here for the limiter to catch up
|
|
|
|
if err := limiter.WaitN(ctx, len(msg.Line)); err != nil {
|
|
|
|
return errors.Wrap(err, "failed rate limiter")
|
2016-10-26 04:14:15 -04:00
|
|
|
}
|
2017-03-20 13:07:04 -04:00
|
|
|
tsp, err := gogotypes.TimestampProto(msg.Timestamp)
|
2016-10-26 04:14:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to convert timestamp")
|
|
|
|
}
|
2017-03-20 13:07:04 -04:00
|
|
|
var stream api.LogStream
|
|
|
|
if msg.Source == "stdout" {
|
|
|
|
stream = api.LogStreamStdout
|
|
|
|
} else if msg.Source == "stderr" {
|
|
|
|
stream = api.LogStreamStderr
|
|
|
|
}
|
2016-10-26 04:14:15 -04:00
|
|
|
|
2017-04-04 18:52:19 -04:00
|
|
|
// parse the details out of the Attrs map
|
2017-07-18 22:01:20 -04:00
|
|
|
var attrs []api.LogAttr
|
|
|
|
if len(msg.Attrs) != 0 {
|
|
|
|
attrs = make([]api.LogAttr, 0, len(msg.Attrs))
|
|
|
|
for _, attr := range msg.Attrs {
|
|
|
|
attrs = append(attrs, api.LogAttr{Key: attr.Key, Value: attr.Value})
|
|
|
|
}
|
2017-04-04 18:52:19 -04:00
|
|
|
}
|
|
|
|
|
2016-10-26 04:14:15 -04:00
|
|
|
if err := publisher.Publish(ctx, api.LogMessage{
|
|
|
|
Context: msgctx,
|
|
|
|
Timestamp: tsp,
|
2017-03-20 13:07:04 -04:00
|
|
|
Stream: stream,
|
2017-04-04 18:52:19 -04:00
|
|
|
Attrs: attrs,
|
2017-03-20 13:07:04 -04:00
|
|
|
Data: msg.Line,
|
2016-10-26 04:14:15 -04:00
|
|
|
}); err != nil {
|
|
|
|
return errors.Wrap(err, "failed to publish log message")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
// Close the runner and clean up any ephemeral resources.
|
|
|
|
func (r *controller) Close() error {
|
|
|
|
select {
|
|
|
|
case <-r.closed:
|
|
|
|
return r.err
|
|
|
|
default:
|
2016-07-25 23:59:02 -04:00
|
|
|
if r.cancelPull != nil {
|
|
|
|
r.cancelPull()
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
r.err = exec.ErrControllerClosed
|
|
|
|
close(r.closed)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-27 21:08:56 -04:00
|
|
|
func (r *controller) matchevent(event events.Message) bool {
|
|
|
|
if event.Type != events.ContainerEventType {
|
|
|
|
return false
|
|
|
|
}
|
2017-05-24 11:22:09 -04:00
|
|
|
// we can't filter using id since it will have huge chances to introduce a deadlock. see #33377.
|
|
|
|
return event.Actor.Attributes["name"] == r.adapter.container.name()
|
2016-06-27 21:08:56 -04:00
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
func (r *controller) checkClosed() error {
|
|
|
|
select {
|
|
|
|
case <-r.closed:
|
|
|
|
return r.err
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) {
|
|
|
|
status := &api.ContainerStatus{
|
|
|
|
ContainerID: ctnr.ID,
|
|
|
|
PID: int32(ctnr.State.Pid),
|
|
|
|
ExitCode: int32(ctnr.State.ExitCode),
|
|
|
|
}
|
|
|
|
|
|
|
|
return status, nil
|
|
|
|
}
|
|
|
|
|
2016-11-10 15:13:26 -05:00
|
|
|
func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) {
|
|
|
|
status := &api.PortStatus{}
|
|
|
|
|
|
|
|
if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 {
|
|
|
|
exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
status.Ports = exposedPorts
|
|
|
|
}
|
|
|
|
|
|
|
|
return status, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) {
|
|
|
|
exposedPorts := make([]*api.PortConfig, 0, len(portMap))
|
|
|
|
|
|
|
|
for portProtocol, mapping := range portMap {
|
|
|
|
parts := strings.SplitN(string(portProtocol), "/", 2)
|
|
|
|
if len(parts) != 2 {
|
|
|
|
return nil, fmt.Errorf("invalid port mapping: %s", portProtocol)
|
|
|
|
}
|
|
|
|
|
|
|
|
port, err := strconv.ParseUint(parts[0], 10, 16)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
protocol := api.ProtocolTCP
|
|
|
|
switch strings.ToLower(parts[1]) {
|
|
|
|
case "tcp":
|
|
|
|
protocol = api.ProtocolTCP
|
|
|
|
case "udp":
|
|
|
|
protocol = api.ProtocolUDP
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("invalid protocol: %s", parts[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, binding := range mapping {
|
|
|
|
hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(aluzzardi): We're losing the port `name` here since
|
|
|
|
// there's no way to retrieve it back from the Engine.
|
|
|
|
exposedPorts = append(exposedPorts, &api.PortConfig{
|
|
|
|
PublishMode: api.PublishModeHost,
|
|
|
|
Protocol: protocol,
|
|
|
|
TargetPort: uint32(port),
|
|
|
|
PublishedPort: uint32(hostPort),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return exposedPorts, nil
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
type exitError struct {
|
2016-06-14 14:11:43 -04:00
|
|
|
code int
|
|
|
|
cause error
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *exitError) Error() string {
|
|
|
|
if e.cause != nil {
|
|
|
|
return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("task: non-zero exit (%v)", e.code)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *exitError) ExitCode() int {
|
2016-06-14 14:11:43 -04:00
|
|
|
return int(e.code)
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *exitError) Cause() error {
|
|
|
|
return e.cause
|
|
|
|
}
|
2016-06-27 21:08:56 -04:00
|
|
|
|
|
|
|
// checkHealth blocks until unhealthy container is detected or ctx exits
|
|
|
|
func (r *controller) checkHealth(ctx context.Context) error {
|
|
|
|
eventq := r.adapter.events(ctx)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
case <-r.closed:
|
|
|
|
return nil
|
|
|
|
case event := <-eventq:
|
|
|
|
if !r.matchevent(event) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch event.Action {
|
|
|
|
case "health_status: unhealthy":
|
|
|
|
return ErrContainerUnhealthy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|