1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/daemon/delete.go
Djordje Lukic 70dc392bfa
Use hashicorp/go-memdb instead of truncindex
memdb already knows how to search by prefix so there is no need to keep
a separate list of container ids in the truncindex

Benchmarks:

$ go test -benchmem -run=^$ -count 5 -tags linux -bench ^BenchmarkDBGetByPrefix100$ github.com/docker/docker/container
goos: linux
goarch: amd64
pkg: github.com/docker/docker/container
cpu: Intel(R) Core(TM) i9-8950HK CPU @ 2.90GHz
BenchmarkDBGetByPrefix100-6        16018             73935 ns/op           33888 B/op       1100 allocs/op
BenchmarkDBGetByPrefix100-6        16502             73150 ns/op           33888 B/op       1100 allocs/op
BenchmarkDBGetByPrefix100-6        16218             74014 ns/op           33856 B/op       1100 allocs/op
BenchmarkDBGetByPrefix100-6        15733             73370 ns/op           33792 B/op       1100 allocs/op
BenchmarkDBGetByPrefix100-6        16432             72546 ns/op           33744 B/op       1100 allocs/op
PASS
ok      github.com/docker/docker/container      9.752s

$ go test -benchmem -run=^$ -count 5 -tags linux -bench ^BenchmarkTruncIndexGet100$ github.com/docker/docker/pkg/truncindex
goos: linux
goarch: amd64
pkg: github.com/docker/docker/pkg/truncindex
cpu: Intel(R) Core(TM) i9-8950HK CPU @ 2.90GHz
BenchmarkTruncIndexGet100-6        16862             73732 ns/op           44776 B/op       1173 allocs/op
BenchmarkTruncIndexGet100-6        16832             73629 ns/op           45184 B/op       1179 allocs/op
BenchmarkTruncIndexGet100-6        17214             73571 ns/op           45160 B/op       1178 allocs/op
BenchmarkTruncIndexGet100-6        16113             71680 ns/op           45360 B/op       1182 allocs/op
BenchmarkTruncIndexGet100-6        16676             71246 ns/op           45056 B/op       1184 allocs/op
PASS
ok      github.com/docker/docker/pkg/truncindex 9.759s

$ go test -benchmem -run=^$ -count 5 -tags linux -bench ^BenchmarkDBGetByPrefix500$ github.com/docker/docker/container
goos: linux
goarch: amd64
pkg: github.com/docker/docker/container
cpu: Intel(R) Core(TM) i9-8950HK CPU @ 2.90GHz
BenchmarkDBGetByPrefix500-6         1539            753541 ns/op          169381 B/op       5500 allocs/op
BenchmarkDBGetByPrefix500-6         1624            749975 ns/op          169458 B/op       5500 allocs/op
BenchmarkDBGetByPrefix500-6         1635            761222 ns/op          169298 B/op       5500 allocs/op
BenchmarkDBGetByPrefix500-6         1693            727856 ns/op          169297 B/op       5500 allocs/op
BenchmarkDBGetByPrefix500-6         1874            710813 ns/op          169570 B/op       5500 allocs/op
PASS
ok      github.com/docker/docker/container      6.711s

$ go test -benchmem -run=^$ -count 5 -tags linux -bench ^BenchmarkTruncIndexGet500$ github.com/docker/docker/pkg/truncindex
goos: linux
goarch: amd64
pkg: github.com/docker/docker/pkg/truncindex
cpu: Intel(R) Core(TM) i9-8950HK CPU @ 2.90GHz
BenchmarkTruncIndexGet500-6         1934            780328 ns/op          224073 B/op       5929 allocs/op
BenchmarkTruncIndexGet500-6         1713            713935 ns/op          225011 B/op       5937 allocs/op
BenchmarkTruncIndexGet500-6         1780            702847 ns/op          224090 B/op       5943 allocs/op
BenchmarkTruncIndexGet500-6         1736            711086 ns/op          224027 B/op       5929 allocs/op
BenchmarkTruncIndexGet500-6         2448            508694 ns/op          222322 B/op       5914 allocs/op
PASS
ok      github.com/docker/docker/pkg/truncindex 6.877s

Signed-off-by: Djordje Lukic <djordje.lukic@docker.com>
2022-05-20 18:22:21 +02:00

162 lines
5.4 KiB
Go

package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"os"
"path"
"strings"
"time"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/containerfs"
"github.com/opencontainers/selinux/go-selinux"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ContainerRm removes the container id from the filesystem. An error
// is returned if the container is not found, or if the remove
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error {
start := time.Now()
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
// Container state RemovalInProgress should be used to avoid races.
if inProgress := ctr.SetRemovalInProgress(); inProgress {
err := fmt.Errorf("removal of container %s is already in progress", name)
return errdefs.Conflict(err)
}
defer ctr.ResetRemovalInProgress()
// check if container wasn't deregistered by previous rm since Get
if c := daemon.containers.Get(ctr.ID); c == nil {
return nil
}
if config.RemoveLink {
return daemon.rmLink(ctr, name)
}
err = daemon.cleanupContainer(ctr, *config)
containerActions.WithValues("delete").UpdateSince(start)
return err
}
func (daemon *Daemon) rmLink(container *container.Container, name string) error {
if name[0] != '/' {
name = "/" + name
}
parent, n := path.Split(name)
if parent == "/" {
return fmt.Errorf("Conflict, cannot remove the default link name of the container")
}
parent = strings.TrimSuffix(parent, "/")
pe, err := daemon.containersReplica.Snapshot().GetID(parent)
if err != nil {
return fmt.Errorf("Cannot get parent %s for link name %s", parent, name)
}
daemon.releaseName(name)
parentContainer, _ := daemon.GetContainer(pe)
if parentContainer != nil {
daemon.linkIndex.unlink(name, container, parentContainer)
if err := daemon.updateNetwork(parentContainer); err != nil {
logrus.Debugf("Could not update network to remove link %s: %v", n, err)
}
}
return nil
}
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, config types.ContainerRmConfig) error {
if container.IsRunning() {
if !config.ForceRemove {
state := container.StateString()
procedure := "Stop the container before attempting removal or force remove"
if state == "paused" {
procedure = "Unpause and then " + strings.ToLower(procedure)
}
err := fmt.Errorf("You cannot remove a %s container %s. %s", state, container.ID, procedure)
return errdefs.Conflict(err)
}
if err := daemon.Kill(container); err != nil {
return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err)
}
}
// stop collection of stats for the container regardless
// if stats are currently getting collected.
daemon.statsCollector.StopCollection(container)
// stopTimeout is the number of seconds to wait for the container to stop
// gracefully before forcibly killing it.
//
// Why 3 seconds? The timeout specified here was originally added in commit
// 1615bb08c7c3fc6c4b22db0a633edda516f97cf0, which added a custom timeout to
// some commands, but lacking an option for a timeout on "docker rm", was
// hardcoded to 10 seconds. Commit 28fd289b448164b77affd8103c0d96fd8110daf9
// later on updated this to 3 seconds (but no background on that change).
//
// If you arrived here and know the answer, you earned yourself a picture
// of a cute animal of your own choosing.
var stopTimeout = 3
if err := daemon.containerStop(context.TODO(), container, containertypes.StopOptions{Timeout: &stopTimeout}); err != nil {
return err
}
// Mark container dead. We don't want anybody to be restarting it.
container.Lock()
container.Dead = true
// Save container state to disk. So that if error happens before
// container meta file got removed from disk, then a restart of
// docker should not make a dead container alive.
if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) {
logrus.Errorf("Error saving dying container to disk: %v", err)
}
container.Unlock()
// When container creation fails and `RWLayer` has not been created yet, we
// do not call `ReleaseRWLayer`
if container.RWLayer != nil {
if err := daemon.imageService.ReleaseLayer(container.RWLayer); err != nil {
err = errors.Wrapf(err, "container %s", container.ID)
container.SetRemovalError(err)
return err
}
container.RWLayer = nil
}
if err := containerfs.EnsureRemoveAll(container.Root); err != nil {
err = errors.Wrapf(err, "unable to remove filesystem for %s", container.ID)
container.SetRemovalError(err)
return err
}
linkNames := daemon.linkIndex.delete(container)
selinux.ReleaseLabel(container.ProcessLabel)
daemon.containers.Delete(container.ID)
daemon.containersReplica.Delete(container)
if err := daemon.removeMountPoints(container, config.RemoveVolume); err != nil {
logrus.Error(err)
}
for _, name := range linkNames {
daemon.releaseName(name)
}
container.SetRemoved()
stateCtr.del(container.ID)
daemon.LogContainerEvent(container, "destroy")
return nil
}