2016-06-13 22:52:49 -04:00
|
|
|
package container
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2016-06-17 10:03:11 -04:00
|
|
|
"github.com/Sirupsen/logrus"
|
|
|
|
|
2016-09-06 14:46:37 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
|
|
|
enginecontainer "github.com/docker/docker/api/types/container"
|
|
|
|
"github.com/docker/docker/api/types/events"
|
|
|
|
"github.com/docker/docker/api/types/filters"
|
|
|
|
"github.com/docker/docker/api/types/network"
|
2016-06-13 22:52:49 -04:00
|
|
|
clustertypes "github.com/docker/docker/daemon/cluster/provider"
|
|
|
|
"github.com/docker/docker/reference"
|
|
|
|
"github.com/docker/swarmkit/agent/exec"
|
|
|
|
"github.com/docker/swarmkit/api"
|
2016-10-13 14:28:32 -04:00
|
|
|
"github.com/docker/swarmkit/protobuf/ptypes"
|
2016-06-13 22:52:49 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2016-06-19 12:53:31 -04:00
|
|
|
// Explicitly use the kernel's default setting for CPU quota of 100ms.
|
2016-06-13 22:52:49 -04:00
|
|
|
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
|
|
|
|
cpuQuotaPeriod = 100 * time.Millisecond
|
|
|
|
|
|
|
|
// systemLabelPrefix represents the reserved namespace for system labels.
|
|
|
|
systemLabelPrefix = "com.docker.swarm"
|
|
|
|
)
|
|
|
|
|
|
|
|
// containerConfig converts task properties into docker container compatible
|
|
|
|
// components.
|
|
|
|
type containerConfig struct {
|
|
|
|
task *api.Task
|
|
|
|
networksAttachments map[string]*api.NetworkAttachment
|
|
|
|
}
|
|
|
|
|
|
|
|
// newContainerConfig returns a validated container config. No methods should
|
|
|
|
// return an error if this function returns without error.
|
|
|
|
func newContainerConfig(t *api.Task) (*containerConfig, error) {
|
|
|
|
var c containerConfig
|
|
|
|
return &c, c.setTask(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) setTask(t *api.Task) error {
|
2016-08-23 19:50:15 -04:00
|
|
|
if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil {
|
2016-06-13 22:52:49 -04:00
|
|
|
return exec.ErrRuntimeUnsupported
|
|
|
|
}
|
|
|
|
|
2016-08-23 19:50:15 -04:00
|
|
|
container := t.Spec.GetContainer()
|
|
|
|
if container != nil {
|
|
|
|
if container.Image == "" {
|
|
|
|
return ErrImageRequired
|
|
|
|
}
|
2016-06-13 22:52:49 -04:00
|
|
|
|
2016-08-23 19:50:15 -04:00
|
|
|
if err := validateMounts(container.Mounts); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-01 17:00:13 -04:00
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
// index the networks by name
|
|
|
|
c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks))
|
|
|
|
for _, attachment := range t.Networks {
|
|
|
|
c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment
|
|
|
|
}
|
|
|
|
|
|
|
|
c.task = t
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-23 19:50:15 -04:00
|
|
|
func (c *containerConfig) id() string {
|
|
|
|
attachment := c.task.Spec.GetAttachment()
|
|
|
|
if attachment == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
return attachment.ContainerID
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) taskID() string {
|
|
|
|
return c.task.ID
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
func (c *containerConfig) endpoint() *api.Endpoint {
|
|
|
|
return c.task.Endpoint
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) spec() *api.ContainerSpec {
|
|
|
|
return c.task.Spec.GetContainer()
|
|
|
|
}
|
|
|
|
|
2016-08-23 19:50:15 -04:00
|
|
|
func (c *containerConfig) nameOrID() string {
|
|
|
|
if c.task.Spec.GetContainer() != nil {
|
|
|
|
return c.name()
|
|
|
|
}
|
|
|
|
|
|
|
|
return c.id()
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
func (c *containerConfig) name() string {
|
|
|
|
if c.task.Annotations.Name != "" {
|
|
|
|
// if set, use the container Annotations.Name field, set in the orchestrator.
|
|
|
|
return c.task.Annotations.Name
|
|
|
|
}
|
|
|
|
|
2016-10-25 17:17:57 -04:00
|
|
|
slot := fmt.Sprint(c.task.Slot)
|
|
|
|
if slot == "" || c.task.Slot == 0 {
|
|
|
|
slot = c.task.NodeID
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
// fallback to service.slot.id.
|
2016-10-25 17:17:57 -04:00
|
|
|
return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID)
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) image() string {
|
|
|
|
raw := c.spec().Image
|
|
|
|
ref, err := reference.ParseNamed(raw)
|
|
|
|
if err != nil {
|
|
|
|
return raw
|
|
|
|
}
|
|
|
|
return reference.WithDefaultTag(ref).String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) config() *enginecontainer.Config {
|
|
|
|
config := &enginecontainer.Config{
|
2016-10-13 14:28:32 -04:00
|
|
|
Labels: c.labels(),
|
|
|
|
User: c.spec().User,
|
|
|
|
Env: c.spec().Env,
|
|
|
|
WorkingDir: c.spec().Dir,
|
|
|
|
Image: c.image(),
|
|
|
|
Volumes: c.volumes(),
|
|
|
|
Healthcheck: c.healthcheck(),
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(c.spec().Command) > 0 {
|
|
|
|
// If Command is provided, we replace the whole invocation with Command
|
|
|
|
// by replacing Entrypoint and specifying Cmd. Args is ignored in this
|
|
|
|
// case.
|
2016-06-30 17:04:02 -04:00
|
|
|
config.Entrypoint = append(config.Entrypoint, c.spec().Command...)
|
|
|
|
config.Cmd = append(config.Cmd, c.spec().Args...)
|
2016-06-13 22:52:49 -04:00
|
|
|
} else if len(c.spec().Args) > 0 {
|
|
|
|
// In this case, we assume the image has an Entrypoint and Args
|
|
|
|
// specifies the arguments for that entrypoint.
|
|
|
|
config.Cmd = c.spec().Args
|
|
|
|
}
|
|
|
|
|
|
|
|
return config
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) labels() map[string]string {
|
|
|
|
var (
|
|
|
|
system = map[string]string{
|
|
|
|
"task": "", // mark as cluster task
|
|
|
|
"task.id": c.task.ID,
|
2016-10-25 17:17:57 -04:00
|
|
|
"task.name": c.name(),
|
2016-06-13 22:52:49 -04:00
|
|
|
"node.id": c.task.NodeID,
|
|
|
|
"service.id": c.task.ServiceID,
|
|
|
|
"service.name": c.task.ServiceAnnotations.Name,
|
|
|
|
}
|
|
|
|
labels = make(map[string]string)
|
|
|
|
)
|
|
|
|
|
|
|
|
// base labels are those defined in the spec.
|
|
|
|
for k, v := range c.spec().Labels {
|
|
|
|
labels[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
// we then apply the overrides from the task, which may be set via the
|
|
|
|
// orchestrator.
|
|
|
|
for k, v := range c.task.Annotations.Labels {
|
|
|
|
labels[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
// finally, we apply the system labels, which override all labels.
|
|
|
|
for k, v := range system {
|
|
|
|
labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
return labels
|
|
|
|
}
|
|
|
|
|
2016-07-08 14:42:07 -04:00
|
|
|
// volumes gets placed into the Volumes field on the containerConfig.
|
|
|
|
func (c *containerConfig) volumes() map[string]struct{} {
|
|
|
|
r := make(map[string]struct{})
|
|
|
|
// Volumes *only* creates anonymous volumes. The rest is mixed in with
|
|
|
|
// binds, which aren't actually binds. Basically, any volume that
|
|
|
|
// results in a single component must be added here.
|
|
|
|
//
|
|
|
|
// This is reversed engineered from the behavior of the engine API.
|
|
|
|
for _, mount := range c.spec().Mounts {
|
|
|
|
if mount.Type == api.MountTypeVolume && mount.Source == "" {
|
|
|
|
r[mount.Target] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) tmpfs() map[string]string {
|
|
|
|
r := make(map[string]string)
|
|
|
|
|
|
|
|
for _, spec := range c.spec().Mounts {
|
|
|
|
if spec.Type != api.MountTypeTmpfs {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
r[spec.Target] = getMountMask(&spec)
|
|
|
|
}
|
|
|
|
|
|
|
|
return r
|
|
|
|
}
|
2016-06-13 22:52:49 -04:00
|
|
|
|
2016-07-08 14:42:07 -04:00
|
|
|
func (c *containerConfig) binds() []string {
|
|
|
|
var r []string
|
|
|
|
for _, mount := range c.spec().Mounts {
|
|
|
|
if mount.Type == api.MountTypeBind || (mount.Type == api.MountTypeVolume && mount.Source != "") {
|
|
|
|
spec := fmt.Sprintf("%s:%s", mount.Source, mount.Target)
|
|
|
|
mask := getMountMask(&mount)
|
2016-07-05 14:43:28 -04:00
|
|
|
if mask != "" {
|
|
|
|
spec = fmt.Sprintf("%s:%s", spec, mask)
|
|
|
|
}
|
|
|
|
r = append(r, spec)
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2016-10-13 14:28:32 -04:00
|
|
|
func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig {
|
|
|
|
hcSpec := c.spec().Healthcheck
|
|
|
|
if hcSpec == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
interval, _ := ptypes.Duration(hcSpec.Interval)
|
|
|
|
timeout, _ := ptypes.Duration(hcSpec.Timeout)
|
|
|
|
return &enginecontainer.HealthConfig{
|
|
|
|
Test: hcSpec.Test,
|
|
|
|
Interval: interval,
|
|
|
|
Timeout: timeout,
|
|
|
|
Retries: int(hcSpec.Retries),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
func getMountMask(m *api.Mount) string {
|
2016-07-05 14:43:28 -04:00
|
|
|
var maskOpts []string
|
|
|
|
if m.ReadOnly {
|
|
|
|
maskOpts = append(maskOpts, "ro")
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
2016-07-08 14:42:07 -04:00
|
|
|
switch m.Type {
|
|
|
|
case api.MountTypeVolume:
|
|
|
|
if m.VolumeOptions != nil && m.VolumeOptions.NoCopy {
|
|
|
|
maskOpts = append(maskOpts, "nocopy")
|
|
|
|
}
|
|
|
|
case api.MountTypeBind:
|
|
|
|
if m.BindOptions == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
switch m.BindOptions.Propagation {
|
|
|
|
case api.MountPropagationPrivate:
|
|
|
|
maskOpts = append(maskOpts, "private")
|
|
|
|
case api.MountPropagationRPrivate:
|
|
|
|
maskOpts = append(maskOpts, "rprivate")
|
|
|
|
case api.MountPropagationShared:
|
|
|
|
maskOpts = append(maskOpts, "shared")
|
|
|
|
case api.MountPropagationRShared:
|
|
|
|
maskOpts = append(maskOpts, "rshared")
|
|
|
|
case api.MountPropagationSlave:
|
|
|
|
maskOpts = append(maskOpts, "slave")
|
|
|
|
case api.MountPropagationRSlave:
|
|
|
|
maskOpts = append(maskOpts, "rslave")
|
|
|
|
}
|
2016-07-08 14:42:07 -04:00
|
|
|
case api.MountTypeTmpfs:
|
|
|
|
if m.TmpfsOptions == nil {
|
|
|
|
break
|
|
|
|
}
|
2016-06-13 22:52:49 -04:00
|
|
|
|
2016-07-08 14:42:07 -04:00
|
|
|
if m.TmpfsOptions.Mode != 0 {
|
|
|
|
maskOpts = append(maskOpts, fmt.Sprintf("mode=%o", m.TmpfsOptions.Mode))
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.TmpfsOptions.SizeBytes != 0 {
|
|
|
|
// calculate suffix here, making this linux specific, but that is
|
|
|
|
// okay, since API is that way anyways.
|
|
|
|
|
|
|
|
// we do this by finding the suffix that divides evenly into the
|
|
|
|
// value, returing the value itself, with no suffix, if it fails.
|
|
|
|
//
|
|
|
|
// For the most part, we don't enforce any semantic to this values.
|
|
|
|
// The operating system will usually align this and enforce minimum
|
|
|
|
// and maximums.
|
|
|
|
var (
|
|
|
|
size = m.TmpfsOptions.SizeBytes
|
|
|
|
suffix string
|
|
|
|
)
|
|
|
|
for _, r := range []struct {
|
|
|
|
suffix string
|
|
|
|
divisor int64
|
|
|
|
}{
|
|
|
|
{"g", 1 << 30},
|
|
|
|
{"m", 1 << 20},
|
|
|
|
{"k", 1 << 10},
|
|
|
|
} {
|
|
|
|
if size%r.divisor == 0 {
|
|
|
|
size = size / r.divisor
|
|
|
|
suffix = r.suffix
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
maskOpts = append(maskOpts, fmt.Sprintf("size=%d%s", size, suffix))
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
}
|
2016-07-08 14:42:07 -04:00
|
|
|
|
2016-06-13 22:52:49 -04:00
|
|
|
return strings.Join(maskOpts, ",")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
|
2016-07-08 14:43:42 -04:00
|
|
|
hc := &enginecontainer.HostConfig{
|
2016-06-13 22:52:49 -04:00
|
|
|
Resources: c.resources(),
|
2016-07-08 14:42:07 -04:00
|
|
|
Binds: c.binds(),
|
|
|
|
Tmpfs: c.tmpfs(),
|
2016-08-09 14:50:20 -04:00
|
|
|
GroupAdd: c.spec().Groups,
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
2016-07-08 14:43:42 -04:00
|
|
|
|
|
|
|
if c.task.LogDriver != nil {
|
|
|
|
hc.LogConfig = enginecontainer.LogConfig{
|
|
|
|
Type: c.task.LogDriver.Name,
|
|
|
|
Config: c.task.LogDriver.Options,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return hc
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// This handles the case of volumes that are defined inside a service Mount
|
|
|
|
func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *types.VolumeCreateRequest {
|
|
|
|
var (
|
|
|
|
driverName string
|
|
|
|
driverOpts map[string]string
|
|
|
|
labels map[string]string
|
|
|
|
)
|
|
|
|
|
|
|
|
if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
|
|
|
|
driverName = mount.VolumeOptions.DriverConfig.Name
|
|
|
|
driverOpts = mount.VolumeOptions.DriverConfig.Options
|
|
|
|
labels = mount.VolumeOptions.Labels
|
|
|
|
}
|
|
|
|
|
|
|
|
if mount.VolumeOptions != nil {
|
|
|
|
return &types.VolumeCreateRequest{
|
|
|
|
Name: mount.Source,
|
|
|
|
Driver: driverName,
|
|
|
|
DriverOpts: driverOpts,
|
|
|
|
Labels: labels,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) resources() enginecontainer.Resources {
|
|
|
|
resources := enginecontainer.Resources{}
|
|
|
|
|
|
|
|
// If no limits are specified let the engine use its defaults.
|
|
|
|
//
|
|
|
|
// TODO(aluzzardi): We might want to set some limits anyway otherwise
|
|
|
|
// "unlimited" tasks will step over the reservation of other tasks.
|
|
|
|
r := c.task.Spec.Resources
|
|
|
|
if r == nil || r.Limits == nil {
|
|
|
|
return resources
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Limits.MemoryBytes > 0 {
|
|
|
|
resources.Memory = r.Limits.MemoryBytes
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Limits.NanoCPUs > 0 {
|
|
|
|
// CPU Period must be set in microseconds.
|
|
|
|
resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
|
|
|
|
resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
|
|
|
|
}
|
|
|
|
|
|
|
|
return resources
|
|
|
|
}
|
|
|
|
|
|
|
|
// Docker daemon supports just 1 network during container create.
|
|
|
|
func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig {
|
|
|
|
var networks []*api.NetworkAttachment
|
2016-08-23 19:50:15 -04:00
|
|
|
if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil {
|
2016-06-13 22:52:49 -04:00
|
|
|
networks = c.task.Networks
|
|
|
|
}
|
|
|
|
|
|
|
|
epConfig := make(map[string]*network.EndpointSettings)
|
|
|
|
if len(networks) > 0 {
|
|
|
|
epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
return &network.NetworkingConfig{EndpointsConfig: epConfig}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create
|
|
|
|
func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig {
|
|
|
|
var networks []*api.NetworkAttachment
|
|
|
|
if c.task.Spec.GetContainer() != nil {
|
|
|
|
networks = c.task.Networks
|
|
|
|
}
|
|
|
|
|
|
|
|
// First network is used during container create. Other networks are used in "docker network connect"
|
|
|
|
if len(networks) < 2 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
epConfig := make(map[string]*network.EndpointSettings)
|
|
|
|
for _, na := range networks[1:] {
|
|
|
|
epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na)
|
|
|
|
}
|
|
|
|
return &network.NetworkingConfig{EndpointsConfig: epConfig}
|
|
|
|
}
|
|
|
|
|
|
|
|
func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings {
|
|
|
|
var ipv4, ipv6 string
|
|
|
|
for _, addr := range na.Addresses {
|
|
|
|
ip, _, err := net.ParseCIDR(addr)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if ip.To4() != nil {
|
|
|
|
ipv4 = ip.String()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if ip.To16() != nil {
|
|
|
|
ipv6 = ip.String()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &network.EndpointSettings{
|
2016-08-23 19:50:15 -04:00
|
|
|
NetworkID: na.Network.ID,
|
2016-06-13 22:52:49 -04:00
|
|
|
IPAMConfig: &network.EndpointIPAMConfig{
|
|
|
|
IPv4Address: ipv4,
|
|
|
|
IPv6Address: ipv6,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) virtualIP(networkID string) string {
|
|
|
|
if c.task.Endpoint == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, eVip := range c.task.Endpoint.VirtualIPs {
|
|
|
|
// We only support IPv4 VIPs for now.
|
|
|
|
if eVip.NetworkID == networkID {
|
|
|
|
vip, _, err := net.ParseCIDR(eVip.Addr)
|
|
|
|
if err != nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
return vip.String()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
|
|
|
|
if len(c.task.Networks) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-17 10:03:11 -04:00
|
|
|
logrus.Debugf("Creating service config in agent for t = %+v", c.task)
|
2016-06-13 22:52:49 -04:00
|
|
|
svcCfg := &clustertypes.ServiceConfig{
|
|
|
|
Name: c.task.ServiceAnnotations.Name,
|
2016-06-14 17:55:05 -04:00
|
|
|
Aliases: make(map[string][]string),
|
2016-06-13 22:52:49 -04:00
|
|
|
ID: c.task.ServiceID,
|
|
|
|
VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, na := range c.task.Networks {
|
|
|
|
svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{
|
|
|
|
// We support only IPv4 virtual IP for now.
|
|
|
|
IPv4: c.virtualIP(na.Network.ID),
|
|
|
|
}
|
2016-06-14 17:55:05 -04:00
|
|
|
if len(na.Aliases) > 0 {
|
|
|
|
svcCfg.Aliases[na.Network.ID] = na.Aliases
|
|
|
|
}
|
2016-06-13 22:52:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if c.task.Endpoint != nil {
|
|
|
|
for _, ePort := range c.task.Endpoint.Ports {
|
|
|
|
svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{
|
|
|
|
Name: ePort.Name,
|
|
|
|
Protocol: int32(ePort.Protocol),
|
|
|
|
TargetPort: ePort.TargetPort,
|
|
|
|
PublishedPort: ePort.PublishedPort,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return svcCfg
|
|
|
|
}
|
|
|
|
|
|
|
|
// networks returns a list of network names attached to the container. The
|
|
|
|
// returned name can be used to lookup the corresponding network create
|
|
|
|
// options.
|
|
|
|
func (c *containerConfig) networks() []string {
|
|
|
|
var networks []string
|
|
|
|
|
|
|
|
for name := range c.networksAttachments {
|
|
|
|
networks = append(networks, name)
|
|
|
|
}
|
|
|
|
|
|
|
|
return networks
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) {
|
|
|
|
na, ok := c.networksAttachments[name]
|
|
|
|
if !ok {
|
|
|
|
return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced")
|
|
|
|
}
|
|
|
|
|
|
|
|
options := types.NetworkCreate{
|
|
|
|
// ID: na.Network.ID,
|
|
|
|
Driver: na.Network.DriverState.Name,
|
2016-08-31 11:25:14 -04:00
|
|
|
IPAM: &network.IPAM{
|
2016-06-13 22:52:49 -04:00
|
|
|
Driver: na.Network.IPAM.Driver.Name,
|
|
|
|
},
|
|
|
|
Options: na.Network.DriverState.Options,
|
2016-06-29 17:59:22 -04:00
|
|
|
Labels: na.Network.Spec.Annotations.Labels,
|
|
|
|
Internal: na.Network.Spec.Internal,
|
|
|
|
EnableIPv6: na.Network.Spec.Ipv6Enabled,
|
2016-06-13 22:52:49 -04:00
|
|
|
CheckDuplicate: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ic := range na.Network.IPAM.Configs {
|
|
|
|
c := network.IPAMConfig{
|
|
|
|
Subnet: ic.Subnet,
|
|
|
|
IPRange: ic.Range,
|
|
|
|
Gateway: ic.Gateway,
|
|
|
|
}
|
|
|
|
options.IPAM.Config = append(options.IPAM.Config, c)
|
|
|
|
}
|
|
|
|
|
|
|
|
return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil
|
|
|
|
}
|
2016-06-27 21:08:56 -04:00
|
|
|
|
|
|
|
func (c containerConfig) eventFilter() filters.Args {
|
|
|
|
filter := filters.NewArgs()
|
|
|
|
filter.Add("type", events.ContainerEventType)
|
|
|
|
filter.Add("name", c.name())
|
|
|
|
filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID))
|
|
|
|
return filter
|
|
|
|
}
|