mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #31030 from aboch/c1.13.x-2
[1.13.x] Vendoring swarmkit @c7df892
This commit is contained in:
commit
b3b30b0dcd
6 changed files with 19 additions and 12 deletions
|
@ -101,7 +101,7 @@ github.com/docker/containerd aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1
|
||||||
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
||||||
|
|
||||||
# cluster
|
# cluster
|
||||||
github.com/docker/swarmkit 1c7f003d75f091d5f7051ed982594420e4515f77
|
github.com/docker/swarmkit c7df892262aa0bec0a3e52ea76219b7b364ded38
|
||||||
github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
|
github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
|
||||||
github.com/gogo/protobuf v0.3
|
github.com/gogo/protobuf v0.3
|
||||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||||
|
|
5
vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
5
vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
|
@ -289,8 +289,9 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
||||||
if a.taskAllocateVote(networkVoter, t.ID) {
|
if a.taskAllocateVote(networkVoter, t.ID) {
|
||||||
// If the task is not attached to any network, network
|
// If the task is not attached to any network, network
|
||||||
// allocators job is done. Immediately cast a vote so
|
// allocators job is done. Immediately cast a vote so
|
||||||
// that the task can be moved to ALLOCATED state as
|
// that the task can be moved to the PENDING state as
|
||||||
// soon as possible.
|
// soon as possible.
|
||||||
|
updateTaskStatus(t, api.TaskStatePending, allocatedStatusMessage)
|
||||||
allocatedTasks = append(allocatedTasks, t)
|
allocatedTasks = append(allocatedTasks, t)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
@ -467,7 +468,7 @@ func taskDead(t *api.Task) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// taskReadyForNetworkVote checks if the task is ready for a network
|
// taskReadyForNetworkVote checks if the task is ready for a network
|
||||||
// vote to move it to ALLOCATED state.
|
// vote to move it to PENDING state.
|
||||||
func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bool {
|
func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bool {
|
||||||
// Task is ready for vote if the following is true:
|
// Task is ready for vote if the following is true:
|
||||||
//
|
//
|
||||||
|
|
|
@ -272,7 +272,8 @@ func (pa *portAllocator) portsAllocatedInHostPublishMode(s *api.Service) bool {
|
||||||
|
|
||||||
if s.Spec.Endpoint != nil {
|
if s.Spec.Endpoint != nil {
|
||||||
for _, portConfig := range s.Spec.Endpoint.Ports {
|
for _, portConfig := range s.Spec.Endpoint.Ports {
|
||||||
if portConfig.PublishMode == api.PublishModeHost {
|
if portConfig.PublishMode == api.PublishModeHost &&
|
||||||
|
portConfig.PublishedPort != 0 {
|
||||||
if portStates.delState(portConfig) == nil {
|
if portStates.delState(portConfig) == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/docker/swarmkit/manager/keymanager/keymanager.go
generated
vendored
2
vendor/github.com/docker/swarmkit/manager/keymanager/keymanager.go
generated
vendored
|
@ -200,8 +200,6 @@ func (k *KeyManager) Run(ctx context.Context) error {
|
||||||
} else {
|
} else {
|
||||||
k.keyRing.lClock = cluster.EncryptionKeyLamportClock
|
k.keyRing.lClock = cluster.EncryptionKeyLamportClock
|
||||||
k.keyRing.keys = cluster.NetworkBootstrapKeys
|
k.keyRing.keys = cluster.NetworkBootstrapKeys
|
||||||
|
|
||||||
k.rotateKey(ctx)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ticker := time.NewTicker(k.config.RotationInterval)
|
ticker := time.NewTicker(k.config.RotationInterval)
|
||||||
|
|
2
vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go
generated
vendored
2
vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go
generated
vendored
|
@ -504,7 +504,7 @@ func (g *Orchestrator) removeTasks(ctx context.Context, batch *store.Batch, task
|
||||||
}
|
}
|
||||||
|
|
||||||
func isTaskRunning(t *api.Task) bool {
|
func isTaskRunning(t *api.Task) bool {
|
||||||
return t != nil && t.DesiredState <= api.TaskStateRunning && t.Status.State <= api.TaskStateRunning
|
return t != nil && t.DesiredState <= api.TaskStateRunning
|
||||||
}
|
}
|
||||||
|
|
||||||
func isTaskCompleted(t *api.Task, restartPolicy api.RestartPolicy_RestartCondition) bool {
|
func isTaskCompleted(t *api.Task, restartPolicy api.RestartPolicy_RestartCondition) bool {
|
||||||
|
|
17
vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
generated
vendored
17
vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
generated
vendored
|
@ -131,11 +131,13 @@ func (tr *TaskReaper) tick() {
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.dirty = make(map[instanceTuple]struct{})
|
|
||||||
tr.orphaned = nil
|
tr.orphaned = nil
|
||||||
}()
|
}()
|
||||||
|
|
||||||
deleteTasks := tr.orphaned
|
deleteTasks := make(map[string]struct{})
|
||||||
|
for _, tID := range tr.orphaned {
|
||||||
|
deleteTasks[tID] = struct{}{}
|
||||||
|
}
|
||||||
tr.store.View(func(tx store.ReadTx) {
|
tr.store.View(func(tx store.ReadTx) {
|
||||||
for dirty := range tr.dirty {
|
for dirty := range tr.dirty {
|
||||||
service := store.GetService(tx, dirty.serviceID)
|
service := store.GetService(tx, dirty.serviceID)
|
||||||
|
@ -180,13 +182,15 @@ func (tr *TaskReaper) tick() {
|
||||||
// instead of sorting the whole slice.
|
// instead of sorting the whole slice.
|
||||||
sort.Sort(tasksByTimestamp(historicTasks))
|
sort.Sort(tasksByTimestamp(historicTasks))
|
||||||
|
|
||||||
|
runningTasks := 0
|
||||||
for _, t := range historicTasks {
|
for _, t := range historicTasks {
|
||||||
if t.DesiredState <= api.TaskStateRunning {
|
if t.DesiredState <= api.TaskStateRunning || t.Status.State <= api.TaskStateRunning {
|
||||||
// Don't delete running tasks
|
// Don't delete running tasks
|
||||||
|
runningTasks++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
deleteTasks = append(deleteTasks, t.ID)
|
deleteTasks[t.ID] = struct{}{}
|
||||||
|
|
||||||
taskHistory++
|
taskHistory++
|
||||||
if int64(len(historicTasks)) <= taskHistory {
|
if int64(len(historicTasks)) <= taskHistory {
|
||||||
|
@ -194,12 +198,15 @@ func (tr *TaskReaper) tick() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if runningTasks <= 1 {
|
||||||
|
delete(tr.dirty, dirty)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
if len(deleteTasks) > 0 {
|
if len(deleteTasks) > 0 {
|
||||||
tr.store.Batch(func(batch *store.Batch) error {
|
tr.store.Batch(func(batch *store.Batch) error {
|
||||||
for _, taskID := range deleteTasks {
|
for taskID := range deleteTasks {
|
||||||
batch.Update(func(tx store.Tx) error {
|
batch.Update(func(tx store.Tx) error {
|
||||||
return store.DeleteTask(tx, taskID)
|
return store.DeleteTask(tx, taskID)
|
||||||
})
|
})
|
||||||
|
|
Loading…
Add table
Reference in a new issue