Merge pull request #31143 from aaronlehmann/vendor-swarmkit-caa9c95

[1.13] Vendor swarmkit 30a4278
This commit is contained in:
Victor Vieux 2017-02-19 00:46:21 -08:00 committed by GitHub
commit c9ae02ec59
4 changed files with 75 additions and 48 deletions

View File

@ -101,7 +101,7 @@ github.com/docker/containerd aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
# cluster
github.com/docker/swarmkit c7df892262aa0bec0a3e52ea76219b7b364ded38
github.com/docker/swarmkit 30a4278953316a0abd88d35c8d6600ff5add2733
github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
github.com/gogo/protobuf v0.3
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a

View File

@ -156,40 +156,53 @@ func reconcilePortConfigs(s *api.Service) []*api.PortConfig {
return s.Spec.Endpoint.Ports
}
allocatedPorts := make(map[api.PortConfig]*api.PortConfig)
portStates := allocatedPorts{}
for _, portState := range s.Endpoint.Ports {
if portState.PublishMode != api.PublishModeIngress {
continue
if portState.PublishMode == api.PublishModeIngress {
portStates.addState(portState)
}
allocatedPorts[getPortConfigKey(portState)] = portState
}
var portConfigs []*api.PortConfig
// Process the portConfig with portConfig.PublishMode != api.PublishModeIngress
// and PublishedPort != 0 (high priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// If the PublishMode is not Ingress simply pick up
// the port config.
if portConfig.PublishMode != api.PublishModeIngress {
// If the PublishMode is not Ingress simply pick up the port config.
portConfigs = append(portConfigs, portConfig)
} else if portConfig.PublishedPort != 0 {
// Otherwise we only process PublishedPort != 0 in this round
// Remove record from portState
portStates.delState(portConfig)
// For PublishedPort != 0 prefer the portConfig
portConfigs = append(portConfigs, portConfig)
continue
}
}
portState, ok := allocatedPorts[getPortConfigKey(portConfig)]
// Iterate portConfigs with PublishedPort == 0 (low priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// Ignore ports which are not PublishModeIngress (already processed)
// And we only process PublishedPort == 0 in this round
// So the following:
// `portConfig.PublishMode == api.PublishModeIngress && portConfig.PublishedPort == 0`
if portConfig.PublishMode == api.PublishModeIngress && portConfig.PublishedPort == 0 {
// If the portConfig is exactly the same as portState
// except if SwarmPort is not user-define then prefer
// portState to ensure sticky allocation of the same
// port that was allocated before.
// If the portConfig is exactly the same as portState
// except if SwarmPort is not user-define then prefer
// portState to ensure sticky allocation of the same
// port that was allocated before.
if ok && portConfig.Name == portState.Name &&
portConfig.TargetPort == portState.TargetPort &&
portConfig.Protocol == portState.Protocol &&
portConfig.PublishedPort == 0 {
portConfigs = append(portConfigs, portState)
continue
// Remove record from portState
if portState := portStates.delState(portConfig); portState != nil {
portConfigs = append(portConfigs, portState)
continue
}
// For all other cases prefer the portConfig
portConfigs = append(portConfigs, portConfig)
}
// For all other cases prefer the portConfig
portConfigs = append(portConfigs, portConfig)
}
return portConfigs
@ -306,40 +319,31 @@ func (pa *portAllocator) isPortsAllocated(s *api.Service) bool {
return false
}
allocatedPorts := make(map[api.PortConfig]*api.PortConfig)
portStates := allocatedPorts{}
for _, portState := range s.Endpoint.Ports {
if portState.PublishMode != api.PublishModeIngress {
continue
if portState.PublishMode == api.PublishModeIngress {
portStates.addState(portState)
}
allocatedPorts[getPortConfigKey(portState)] = portState
}
// Iterate portConfigs with PublishedPort != 0 (high priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// Ignore ports which are not PublishModeIngress
if portConfig.PublishMode != api.PublishModeIngress {
continue
}
portState, ok := allocatedPorts[getPortConfigKey(portConfig)]
// If name, port, protocol values don't match then we
// are not allocated.
if !ok {
if portConfig.PublishedPort != 0 && portStates.delState(portConfig) == nil {
return false
}
}
// If SwarmPort was user defined but the port state
// SwarmPort doesn't match we are not allocated.
if portConfig.PublishedPort != portState.PublishedPort &&
portConfig.PublishedPort != 0 {
return false
// Iterate portConfigs with PublishedPort == 0 (low priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// Ignore ports which are not PublishModeIngress
if portConfig.PublishMode != api.PublishModeIngress {
continue
}
// If SwarmPort was not defined by user and port state
// is not initialized with a valid SwarmPort value then
// we are not allocated.
if portConfig.PublishedPort == 0 && portState.PublishedPort == 0 {
if portConfig.PublishedPort == 0 && portStates.delState(portConfig) == nil {
return false
}
}

View File

@ -1,11 +1,14 @@
package constraintenforcer
import (
"time"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/constraint"
"github.com/docker/swarmkit/manager/state"
"github.com/docker/swarmkit/manager/state/store"
"github.com/docker/swarmkit/protobuf/ptypes"
)
// ConstraintEnforcer watches for updates to nodes and shuts down tasks that no
@ -43,7 +46,7 @@ func (ce *ConstraintEnforcer) Run() {
log.L.WithError(err).Error("failed to check nodes for noncompliant tasks")
} else {
for _, node := range nodes {
ce.shutdownNoncompliantTasks(node)
ce.rejectNoncompliantTasks(node)
}
}
@ -51,14 +54,14 @@ func (ce *ConstraintEnforcer) Run() {
select {
case event := <-watcher:
node := event.(state.EventUpdateNode).Node
ce.shutdownNoncompliantTasks(node)
ce.rejectNoncompliantTasks(node)
case <-ce.stopChan:
return
}
}
}
func (ce *ConstraintEnforcer) shutdownNoncompliantTasks(node *api.Node) {
func (ce *ConstraintEnforcer) rejectNoncompliantTasks(node *api.Node) {
// If the availability is "drain", the orchestrator will
// shut down all tasks.
// If the availability is "pause", we shouldn't touch
@ -134,7 +137,16 @@ func (ce *ConstraintEnforcer) shutdownNoncompliantTasks(node *api.Node) {
return nil
}
t.DesiredState = api.TaskStateShutdown
// We set the observed state to
// REJECTED, rather than the desired
// state. Desired state is owned by the
// orchestrator, and setting it directly
// will bypass actions such as
// restarting the task on another node
// (if applicable).
t.Status.State = api.TaskStateRejected
t.Status.Message = "assigned node no longer meets constraints"
t.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
return store.UpdateTask(tx, t)
})
if err != nil {

View File

@ -460,6 +460,17 @@ func (g *Orchestrator) restartTask(ctx context.Context, taskID string, serviceID
if service == nil {
return nil
}
node, nodeExists := g.nodes[t.NodeID]
serviceEntry, serviceExists := g.globalServices[t.ServiceID]
if !nodeExists || !serviceExists {
return nil
}
if !constraint.NodeMatches(serviceEntry.constraints, node) {
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
}
return g.restarts.Restart(ctx, tx, g.cluster, service, *t)
})
if err != nil {