1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/daemon/delete.go
Doug Davis 26b1064967 Add context.RequestID to event stream
This PR adds a "request ID" to each event generated, the 'docker events'
stream now looks like this:

```
2015-09-10T15:02:50.000000000-07:00 [reqid: c01e3534ddca] de7c5d4ca927253cf4e978ee9c4545161e406e9b5a14617efb52c658b249174a: (from ubuntu) create
```
Note the `[reqID: c01e3534ddca]` part, that's new.

Each HTTP request will generate its own unique ID. So, if you do a
`docker build` you'll see a series of events all with the same reqID.
This allow for log processing tools to determine which events are all related
to the same http request.

I didn't propigate the context to all possible funcs in the daemon,
I decided to just do the ones that needed it in order to get the reqID
into the events. I'd like to have people review this direction first, and
if we're ok with it then I'll make sure we're consistent about when
we pass around the context - IOW, make sure that all funcs at the same level
have a context passed in even if they don't call the log funcs - this will
ensure we're consistent w/o passing it around for all calls unnecessarily.

ping @icecrime @calavera @crosbymichael

Signed-off-by: Doug Davis <dug@us.ibm.com>
2015-09-24 11:56:37 -07:00

166 lines
4.6 KiB
Go

package daemon
import (
"fmt"
"os"
"path"
"github.com/docker/docker/context"
"github.com/Sirupsen/logrus"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/volume/store"
)
// ContainerRmConfig is a holder for passing in runtime config.
type ContainerRmConfig struct {
ForceRemove, RemoveVolume, RemoveLink bool
}
// ContainerRm removes the container id from the filesystem. An error
// is returned if the container is not found, or if the remove
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(ctx context.Context, name string, config *ContainerRmConfig) error {
container, err := daemon.Get(ctx, name)
if err != nil {
return err
}
if config.RemoveLink {
name, err := GetFullContainerName(name)
if err != nil {
return err
}
parent, n := path.Split(name)
if parent == "/" {
return derr.ErrorCodeDefaultName
}
pe := daemon.containerGraph().Get(parent)
if pe == nil {
return derr.ErrorCodeNoParent.WithArgs(parent, name)
}
if err := daemon.containerGraph().Delete(name); err != nil {
return err
}
parentContainer, _ := daemon.Get(ctx, pe.ID())
if parentContainer != nil {
if err := parentContainer.updateNetwork(ctx); err != nil {
logrus.Debugf("Could not update network to remove link %s: %v", n, err)
}
}
return nil
}
if err := daemon.rm(ctx, container, config.ForceRemove); err != nil {
// return derr.ErrorCodeCantDestroy.WithArgs(name, utils.GetErrorMessage(err))
return err
}
if err := container.removeMountPoints(config.RemoveVolume); err != nil {
logrus.Error(err)
}
return nil
}
// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
func (daemon *Daemon) rm(ctx context.Context, container *Container, forceRemove bool) (err error) {
if container.IsRunning() {
if !forceRemove {
return derr.ErrorCodeRmRunning
}
if err := container.Kill(ctx); err != nil {
return derr.ErrorCodeRmFailed.WithArgs(err)
}
}
// stop collection of stats for the container regardless
// if stats are currently getting collected.
daemon.statsCollector.stopCollection(container)
element := daemon.containers.Get(container.ID)
if element == nil {
return derr.ErrorCodeRmNotFound.WithArgs(container.ID)
}
// Container state RemovalInProgress should be used to avoid races.
if err = container.setRemovalInProgress(); err != nil {
return derr.ErrorCodeRmState.WithArgs(err)
}
defer container.resetRemovalInProgress()
if err = container.Stop(ctx, 3); err != nil {
return err
}
// Mark container dead. We don't want anybody to be restarting it.
container.setDead()
// Save container state to disk. So that if error happens before
// container meta file got removed from disk, then a restart of
// docker should not make a dead container alive.
if err := container.toDiskLocking(); err != nil {
logrus.Errorf("Error saving dying container to disk: %v", err)
}
// If force removal is required, delete container from various
// indexes even if removal failed.
defer func() {
if err != nil && forceRemove {
daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID)
os.RemoveAll(container.root)
container.logEvent(ctx, "destroy")
}
}()
if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil {
logrus.Debugf("Unable to remove container from link graph: %s", err)
}
if err = daemon.driver.Remove(container.ID); err != nil {
return derr.ErrorCodeRmDriverFS.WithArgs(daemon.driver, container.ID, err)
}
initID := fmt.Sprintf("%s-init", container.ID)
if err := daemon.driver.Remove(initID); err != nil {
return derr.ErrorCodeRmInit.WithArgs(daemon.driver, initID, err)
}
if err = os.RemoveAll(container.root); err != nil {
return derr.ErrorCodeRmFS.WithArgs(container.ID, err)
}
if err = daemon.execDriver.Clean(container.ID); err != nil {
return derr.ErrorCodeRmExecDriver.WithArgs(container.ID, err)
}
selinuxFreeLxcContexts(container.ProcessLabel)
daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID)
container.logEvent(ctx, "destroy")
return nil
}
// VolumeRm removes the volume with the given name.
// If the volume is referenced by a container it is not removed
// This is called directly from the remote API
func (daemon *Daemon) VolumeRm(ctx context.Context, name string) error {
v, err := daemon.volumes.Get(name)
if err != nil {
return err
}
if err := daemon.volumes.Remove(v); err != nil {
if err == store.ErrVolumeInUse {
return derr.ErrorCodeRmVolumeInUse.WithArgs(err)
}
return derr.ErrorCodeRmVolume.WithArgs(name, err)
}
return nil
}