mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Update swarmkit to 68266392a176434d282760d2d6d0ab4c68edcae6
changes included: - swarmkit #2706 address unassigned task leak when service is removed - swarmkit #2676 Fix racy batching on the dispatcher - swarmkit #2693 Fix linting issues revealed by Go 1.11 Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
8f683c6ed5
commit
c9377f4552
4 changed files with 26 additions and 3 deletions
|
@ -125,7 +125,7 @@ github.com/containerd/ttrpc 94dde388801693c54f88a6596f713b51a8b30b2d
|
||||||
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
|
||||||
|
|
||||||
# cluster
|
# cluster
|
||||||
github.com/docker/swarmkit 199cf49cd99690135d99e52a1907ec82e8113c4f
|
github.com/docker/swarmkit 68266392a176434d282760d2d6d0ab4c68edcae6
|
||||||
github.com/gogo/protobuf v1.0.0
|
github.com/gogo/protobuf v1.0.0
|
||||||
github.com/cloudflare/cfssl 1.3.2
|
github.com/cloudflare/cfssl 1.3.2
|
||||||
github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
|
github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
|
||||||
|
|
11
vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
generated
vendored
11
vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
generated
vendored
|
@ -293,9 +293,16 @@ func (d *Dispatcher) Run(ctx context.Context) error {
|
||||||
publishManagers(ev.([]*api.Peer))
|
publishManagers(ev.([]*api.Peer))
|
||||||
case <-d.processUpdatesTrigger:
|
case <-d.processUpdatesTrigger:
|
||||||
d.processUpdates(ctx)
|
d.processUpdates(ctx)
|
||||||
|
batchTimer.Stop()
|
||||||
|
// drain the timer, if it has already expired
|
||||||
|
select {
|
||||||
|
case <-batchTimer.C:
|
||||||
|
default:
|
||||||
|
}
|
||||||
batchTimer.Reset(maxBatchInterval)
|
batchTimer.Reset(maxBatchInterval)
|
||||||
case <-batchTimer.C:
|
case <-batchTimer.C:
|
||||||
d.processUpdates(ctx)
|
d.processUpdates(ctx)
|
||||||
|
// batch timer has already expired, so no need to drain
|
||||||
batchTimer.Reset(maxBatchInterval)
|
batchTimer.Reset(maxBatchInterval)
|
||||||
case v := <-configWatcher:
|
case v := <-configWatcher:
|
||||||
cluster := v.(api.EventUpdateCluster)
|
cluster := v.(api.EventUpdateCluster)
|
||||||
|
@ -416,7 +423,7 @@ func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
|
||||||
|
|
||||||
expireFunc := func() {
|
expireFunc := func() {
|
||||||
log := log.WithField("node", nodeID)
|
log := log.WithField("node", nodeID)
|
||||||
log.Info(`heartbeat expiration for node %s in state "unknown"`, nodeID)
|
log.Infof(`heartbeat expiration for node %s in state "unknown"`, nodeID)
|
||||||
if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil {
|
if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil {
|
||||||
log.WithError(err).Error(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
|
log.WithError(err).Error(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
|
||||||
}
|
}
|
||||||
|
@ -537,7 +544,7 @@ func (d *Dispatcher) register(ctx context.Context, nodeID string, description *a
|
||||||
}
|
}
|
||||||
|
|
||||||
expireFunc := func() {
|
expireFunc := func() {
|
||||||
log.G(ctx).Debug("heartbeat expiration for worker %s, setting worker status to NodeStatus_DOWN ", nodeID)
|
log.G(ctx).Debugf("heartbeat expiration for worker %s, setting worker status to NodeStatus_DOWN ", nodeID)
|
||||||
if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, "heartbeat failure"); err != nil {
|
if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, "heartbeat failure"); err != nil {
|
||||||
log.G(ctx).WithError(err).Errorf("failed deregistering node after heartbeat expiration")
|
log.G(ctx).WithError(err).Errorf("failed deregistering node after heartbeat expiration")
|
||||||
}
|
}
|
||||||
|
|
11
vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
generated
vendored
11
vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
generated
vendored
|
@ -702,9 +702,20 @@ func (s *Scheduler) scheduleNTasksOnNodes(ctx context.Context, n int, taskGroup
|
||||||
return tasksScheduled
|
return tasksScheduled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// noSuitableNode checks unassigned tasks and make sure they have an existing service in the store before
|
||||||
|
// updating the task status and adding it back to: schedulingDecisions, unassignedTasks and allTasks
|
||||||
func (s *Scheduler) noSuitableNode(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) {
|
func (s *Scheduler) noSuitableNode(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) {
|
||||||
explanation := s.pipeline.Explain()
|
explanation := s.pipeline.Explain()
|
||||||
for _, t := range taskGroup {
|
for _, t := range taskGroup {
|
||||||
|
var service *api.Service
|
||||||
|
s.store.View(func(tx store.ReadTx) {
|
||||||
|
service = store.GetService(tx, t.ServiceID)
|
||||||
|
})
|
||||||
|
if service == nil {
|
||||||
|
log.G(ctx).WithField("task.id", t.ID).Debug("removing task from the scheduler")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
log.G(ctx).WithField("task.id", t.ID).Debug("no suitable node available for task")
|
log.G(ctx).WithField("task.id", t.ID).Debug("no suitable node available for task")
|
||||||
|
|
||||||
newT := *t
|
newT := *t
|
||||||
|
|
5
vendor/github.com/docker/swarmkit/node/node.go
generated
vendored
5
vendor/github.com/docker/swarmkit/node/node.go
generated
vendored
|
@ -1136,6 +1136,11 @@ func (n *Node) superviseManager(ctx context.Context, securityConfig *ca.Security
|
||||||
// re-promoted. In this case, we must assume we were
|
// re-promoted. In this case, we must assume we were
|
||||||
// re-promoted, and restart the manager.
|
// re-promoted, and restart the manager.
|
||||||
log.G(ctx).Warn("failed to get worker role after manager stop, forcing certificate renewal")
|
log.G(ctx).Warn("failed to get worker role after manager stop, forcing certificate renewal")
|
||||||
|
|
||||||
|
// We can safely reset this timer without stopping/draining the timer
|
||||||
|
// first because the only way the code has reached this point is if the timer
|
||||||
|
// has already expired - if the role changed or the context were canceled,
|
||||||
|
// then we would have returned already.
|
||||||
timer.Reset(roleChangeTimeout)
|
timer.Reset(roleChangeTimeout)
|
||||||
|
|
||||||
renewer.Renew()
|
renewer.Renew()
|
||||||
|
|
Loading…
Add table
Reference in a new issue