moby--moby/daemon/delete.go

170 lines
5.7 KiB
Go
Raw Normal View History

package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"os"
"path"
"strings"
"time"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
pkg/system: move EnsureRemoveAll() to pkg/containerfs pkg/system historically has been a bit of a kitchen-sink of things that were somewhat "system" related, but didn't have a good place for. EnsureRemoveAll() is one of those utilities. EnsureRemoveAll() is used to both unmount and remove a path, for which it depends on both github.com/moby/sys/mount, which in turn depends on github.com/moby/sys/mountinfo. pkg/system is imported in the CLI, but neither EnsureRemoveAll(), nor any of its moby/sys dependencies are used on the client side, so let's move this function somewhere else, to remove those dependencies from the CLI. I looked for plausible locations that were related; it's used in: - daemon - daemon/graphdriver/XXX/ - plugin I considered moving it into a (e.g.) "utils" package within graphdriver (but not a huge fan of "utils" packages), and given that it felt (mostly) related to cleaning up container filesystems, I decided to move it there. Some things to follow-up on after this: - Verify if this function is still needed (it feels a bit like a big hammer in a "YOLO, let's try some things just in case it fails") - Perhaps it should be integrated in `containerfs.Remove()` (so that it's used automatically) - Look if there's other implementations (and if they should be consolidated), although (e.g.) the one in containerd is a copy of ours: https://github.com/containerd/containerd/blob/v1.5.9/pkg/cri/server/helpers_linux.go#L200 Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-03-02 21:43:07 +00:00
"github.com/docker/docker/pkg/containerfs"
"github.com/opencontainers/selinux/go-selinux"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ContainerRm removes the container id from the filesystem. An error
// is returned if the container is not found, or if the remove
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error {
start := time.Now()
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
// Container state RemovalInProgress should be used to avoid races.
if inProgress := ctr.SetRemovalInProgress(); inProgress {
err := fmt.Errorf("removal of container %s is already in progress", name)
return errdefs.Conflict(err)
}
defer ctr.ResetRemovalInProgress()
// check if container wasn't deregistered by previous rm since Get
if c := daemon.containers.Get(ctr.ID); c == nil {
return nil
}
if config.RemoveLink {
return daemon.rmLink(ctr, name)
}
err = daemon.cleanupContainer(ctr, *config)
containerActions.WithValues("delete").UpdateSince(start)
return err
}
func (daemon *Daemon) rmLink(container *container.Container, name string) error {
if name[0] != '/' {
name = "/" + name
}
parent, n := path.Split(name)
if parent == "/" {
return fmt.Errorf("Conflict, cannot remove the default link name of the container")
}
parent = strings.TrimSuffix(parent, "/")
pe, err := daemon.containersReplica.Snapshot().GetID(parent)
if err != nil {
return fmt.Errorf("Cannot get parent %s for link name %s", parent, name)
}
daemon.releaseName(name)
parentContainer, _ := daemon.GetContainer(pe)
if parentContainer != nil {
daemon.linkIndex.unlink(name, container, parentContainer)
if err := daemon.updateNetwork(parentContainer); err != nil {
logrus.Debugf("Could not update network to remove link %s: %v", n, err)
}
}
return nil
}
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, config types.ContainerRmConfig) error {
if container.IsRunning() {
if !config.ForceRemove {
state := container.StateString()
procedure := "Stop the container before attempting removal or force remove"
if state == "paused" {
procedure = "Unpause and then " + strings.ToLower(procedure)
}
err := fmt.Errorf("You cannot remove a %s container %s. %s", state, container.ID, procedure)
return errdefs.Conflict(err)
}
if err := daemon.Kill(container); err != nil {
Remove static errors from errors package. Moving all strings to the errors package wasn't a good idea after all. Our custom implementation of Go errors predates everything that's nice and good about working with errors in Go. Take as an example what we have to do to get an error message: ```go func GetErrorMessage(err error) string { switch err.(type) { case errcode.Error: e, _ := err.(errcode.Error) return e.Message case errcode.ErrorCode: ec, _ := err.(errcode.ErrorCode) return ec.Message() default: return err.Error() } } ``` This goes against every good practice for Go development. The language already provides a simple, intuitive and standard way to get error messages, that is calling the `Error()` method from an error. Reinventing the error interface is a mistake. Our custom implementation also makes very hard to reason about errors, another nice thing about Go. I found several (>10) error declarations that we don't use anywhere. This is a clear sign about how little we know about the errors we return. I also found several error usages where the number of arguments was different than the parameters declared in the error, another clear example of how difficult is to reason about errors. Moreover, our custom implementation didn't really make easier for people to return custom HTTP status code depending on the errors. Again, it's hard to reason about when to set custom codes and how. Take an example what we have to do to extract the message and status code from an error before returning a response from the API: ```go switch err.(type) { case errcode.ErrorCode: daError, _ := err.(errcode.ErrorCode) statusCode = daError.Descriptor().HTTPStatusCode errMsg = daError.Message() case errcode.Error: // For reference, if you're looking for a particular error // then you can do something like : // import ( derr "github.com/docker/docker/errors" ) // if daError.ErrorCode() == derr.ErrorCodeNoSuchContainer { ... } daError, _ := err.(errcode.Error) statusCode = daError.ErrorCode().Descriptor().HTTPStatusCode errMsg = daError.Message default: // This part of will be removed once we've // converted everything over to use the errcode package // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, // we should create appropriate error types with clearly defined meaning errStr := strings.ToLower(err.Error()) for keyword, status := range map[string]int{ "not found": http.StatusNotFound, "no such": http.StatusNotFound, "bad parameter": http.StatusBadRequest, "conflict": http.StatusConflict, "impossible": http.StatusNotAcceptable, "wrong login/password": http.StatusUnauthorized, "hasn't been activated": http.StatusForbidden, } { if strings.Contains(errStr, keyword) { statusCode = status break } } } ``` You can notice two things in that code: 1. We have to explain how errors work, because our implementation goes against how easy to use Go errors are. 2. At no moment we arrived to remove that `switch` statement that was the original reason to use our custom implementation. This change removes all our status errors from the errors package and puts them back in their specific contexts. IT puts the messages back with their contexts. That way, we know right away when errors used and how to generate their messages. It uses custom interfaces to reason about errors. Errors that need to response with a custom status code MUST implementent this simple interface: ```go type errorWithStatus interface { HTTPErrorStatusCode() int } ``` This interface is very straightforward to implement. It also preserves Go errors real behavior, getting the message is as simple as using the `Error()` method. I included helper functions to generate errors that use custom status code in `errors/errors.go`. By doing this, we remove the hard dependency we have eeverywhere to our custom errors package. Yes, you can use it as a helper to generate error, but it's still very easy to generate errors without it. Please, read this fantastic blog post about errors in Go: http://dave.cheney.net/2014/12/24/inspecting-errors Signed-off-by: David Calavera <david.calavera@gmail.com>
2016-02-25 15:53:35 +00:00
return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err)
}
}
// stop collection of stats for the container regardless
// if stats are currently getting collected.
daemon.statsCollector.StopCollection(container)
// stopTimeout is the number of seconds to wait for the container to stop
// gracefully before forcibly killing it.
//
// Why 3 seconds? The timeout specified here was originally added in commit
// 1615bb08c7c3fc6c4b22db0a633edda516f97cf0, which added a custom timeout to
// some commands, but lacking an option for a timeout on "docker rm", was
// hardcoded to 10 seconds. Commit 28fd289b448164b77affd8103c0d96fd8110daf9
// later on updated this to 3 seconds (but no background on that change).
//
// If you arrived here and know the answer, you earned yourself a picture
// of a cute animal of your own choosing.
var stopTimeout = 3
if err := daemon.containerStop(context.TODO(), container, containertypes.StopOptions{Timeout: &stopTimeout}); err != nil {
return err
}
// Mark container dead. We don't want anybody to be restarting it.
container.Lock()
container.Dead = true
// Save container state to disk. So that if error happens before
// container meta file got removed from disk, then a restart of
// docker should not make a dead container alive.
if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) {
logrus.Errorf("Error saving dying container to disk: %v", err)
}
container.Unlock()
// When container creation fails and `RWLayer` has not been created yet, we
// do not call `ReleaseRWLayer`
if container.RWLayer != nil {
if err := daemon.imageService.ReleaseLayer(container.RWLayer); err != nil {
err = errors.Wrapf(err, "container %s", container.ID)
container.SetRemovalError(err)
return err
}
container.RWLayer = nil
}
// Hold the container lock while deleting the container root directory
// so that other goroutines don't attempt to concurrently open files
// within it. Having any file open on Windows (without the
// FILE_SHARE_DELETE flag) will block it from being deleted.
container.Lock()
err := containerfs.EnsureRemoveAll(container.Root)
container.Unlock()
if err != nil {
err = errors.Wrapf(err, "unable to remove filesystem for %s", container.ID)
container.SetRemovalError(err)
return err
}
linkNames := daemon.linkIndex.delete(container)
selinux.ReleaseLabel(container.ProcessLabel)
daemon.containers.Delete(container.ID)
daemon.containersReplica.Delete(container)
if err := daemon.removeMountPoints(container, config.RemoveVolume); err != nil {
logrus.Error(err)
}
for _, name := range linkNames {
daemon.releaseName(name)
}
container.SetRemoved()
stateCtr.del(container.ID)
daemon.LogContainerEvent(container, "destroy")
return nil
}