mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Vendor swarmkit d316a73
Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
parent
d2532c636f
commit
9bede621f1
3 changed files with 19 additions and 13 deletions
|
@ -104,7 +104,7 @@ github.com/docker/containerd 422e31ce907fd9c3833a38d7b8fdd023e5a76e73
|
|||
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit 0e2d9ebcea9d5bbd4a06b3b964fb96356801f880
|
||||
github.com/docker/swarmkit d316a73f803e9eb75e3daa7e0f846017b0c9a145
|
||||
github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
|
||||
github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
|
|
4
vendor/github.com/docker/swarmkit/manager/scheduler/nodeset.go
generated
vendored
4
vendor/github.com/docker/swarmkit/manager/scheduler/nodeset.go
generated
vendored
|
@ -111,6 +111,10 @@ func (ns *nodeSet) tree(serviceID string, preferences []*api.PlacementPreference
|
|||
tree = next
|
||||
}
|
||||
|
||||
if node.ActiveTasksCountByService != nil {
|
||||
tree.tasks += node.ActiveTasksCountByService[serviceID]
|
||||
}
|
||||
|
||||
if tree.nodeHeap.lessFunc == nil {
|
||||
tree.nodeHeap.lessFunc = nodeLess
|
||||
}
|
||||
|
|
26
vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
generated
vendored
26
vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
generated
vendored
|
@ -1,7 +1,6 @@
|
|||
package scheduler
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"time"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
|
@ -30,7 +29,7 @@ type schedulingDecision struct {
|
|||
// Scheduler assigns tasks to nodes.
|
||||
type Scheduler struct {
|
||||
store *store.MemoryStore
|
||||
unassignedTasks *list.List
|
||||
unassignedTasks map[string]*api.Task
|
||||
// preassignedTasks already have NodeID, need resource validation
|
||||
preassignedTasks map[string]*api.Task
|
||||
nodeSet nodeSet
|
||||
|
@ -47,7 +46,7 @@ type Scheduler struct {
|
|||
func New(store *store.MemoryStore) *Scheduler {
|
||||
return &Scheduler{
|
||||
store: store,
|
||||
unassignedTasks: list.New(),
|
||||
unassignedTasks: make(map[string]*api.Task),
|
||||
preassignedTasks: make(map[string]*api.Task),
|
||||
allTasks: make(map[string]*api.Task),
|
||||
stopChan: make(chan struct{}),
|
||||
|
@ -191,7 +190,7 @@ func (s *Scheduler) Stop() {
|
|||
|
||||
// enqueue queues a task for scheduling.
|
||||
func (s *Scheduler) enqueue(t *api.Task) {
|
||||
s.unassignedTasks.PushBack(t)
|
||||
s.unassignedTasks[t.ID] = t
|
||||
}
|
||||
|
||||
func (s *Scheduler) createTask(ctx context.Context, t *api.Task) int {
|
||||
|
@ -333,15 +332,12 @@ func (s *Scheduler) processPreassignedTasks(ctx context.Context) {
|
|||
// tick attempts to schedule the queue.
|
||||
func (s *Scheduler) tick(ctx context.Context) {
|
||||
tasksByCommonSpec := make(map[string]map[string]*api.Task)
|
||||
schedulingDecisions := make(map[string]schedulingDecision, s.unassignedTasks.Len())
|
||||
schedulingDecisions := make(map[string]schedulingDecision, len(s.unassignedTasks))
|
||||
|
||||
var next *list.Element
|
||||
for e := s.unassignedTasks.Front(); e != nil; e = next {
|
||||
next = e.Next()
|
||||
t := s.allTasks[e.Value.(*api.Task).ID]
|
||||
for taskID, t := range s.unassignedTasks {
|
||||
if t == nil || t.NodeID != "" {
|
||||
// task deleted or already assigned
|
||||
s.unassignedTasks.Remove(e)
|
||||
delete(s.unassignedTasks, taskID)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -362,8 +358,8 @@ func (s *Scheduler) tick(ctx context.Context) {
|
|||
if tasksByCommonSpec[taskGroupKey] == nil {
|
||||
tasksByCommonSpec[taskGroupKey] = make(map[string]*api.Task)
|
||||
}
|
||||
tasksByCommonSpec[taskGroupKey][t.ID] = t
|
||||
s.unassignedTasks.Remove(e)
|
||||
tasksByCommonSpec[taskGroupKey][taskID] = t
|
||||
delete(s.unassignedTasks, taskID)
|
||||
}
|
||||
|
||||
for _, taskGroup := range tasksByCommonSpec {
|
||||
|
@ -602,6 +598,12 @@ func (s *Scheduler) scheduleNTasksOnNodes(ctx context.Context, n int, taskGroup
|
|||
nodeIter := 0
|
||||
nodeCount := len(nodes)
|
||||
for taskID, t := range taskGroup {
|
||||
// Skip tasks which were already scheduled because they ended
|
||||
// up in two groups at once.
|
||||
if _, exists := schedulingDecisions[taskID]; exists {
|
||||
continue
|
||||
}
|
||||
|
||||
node := &nodes[nodeIter%nodeCount]
|
||||
|
||||
log.G(ctx).WithField("task.id", t.ID).Debugf("assigning to node %s", node.ID)
|
||||
|
|
Loading…
Reference in a new issue