mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
26b1064967
This PR adds a "request ID" to each event generated, the 'docker events' stream now looks like this: ``` 2015-09-10T15:02:50.000000000-07:00 [reqid: c01e3534ddca] de7c5d4ca927253cf4e978ee9c4545161e406e9b5a14617efb52c658b249174a: (from ubuntu) create ``` Note the `[reqID: c01e3534ddca]` part, that's new. Each HTTP request will generate its own unique ID. So, if you do a `docker build` you'll see a series of events all with the same reqID. This allow for log processing tools to determine which events are all related to the same http request. I didn't propigate the context to all possible funcs in the daemon, I decided to just do the ones that needed it in order to get the reqID into the events. I'd like to have people review this direction first, and if we're ok with it then I'll make sure we're consistent about when we pass around the context - IOW, make sure that all funcs at the same level have a context passed in even if they don't call the log funcs - this will ensure we're consistent w/o passing it around for all calls unnecessarily. ping @icecrime @calavera @crosbymichael Signed-off-by: Doug Davis <dug@us.ibm.com>
140 lines
4.2 KiB
Go
140 lines
4.2 KiB
Go
package daemon
|
|
|
|
import (
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/docker/docker/api/types"
|
|
"github.com/docker/docker/context"
|
|
)
|
|
|
|
// ContainerInspect returns low-level information about a
|
|
// container. Returns an error if the container cannot be found, or if
|
|
// there is an error getting the data.
|
|
func (daemon *Daemon) ContainerInspect(ctx context.Context, name string) (*types.ContainerJSON, error) {
|
|
container, err := daemon.Get(ctx, name)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
container.Lock()
|
|
defer container.Unlock()
|
|
|
|
base, err := daemon.getInspectData(ctx, container)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
mountPoints := addMountPoints(container)
|
|
|
|
return &types.ContainerJSON{base, mountPoints, container.Config}, nil
|
|
}
|
|
|
|
// ContainerInspect120 serializes the master version of a container into a json type.
|
|
func (daemon *Daemon) ContainerInspect120(ctx context.Context, name string) (*types.ContainerJSON120, error) {
|
|
container, err := daemon.Get(ctx, name)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
container.Lock()
|
|
defer container.Unlock()
|
|
|
|
base, err := daemon.getInspectData(ctx, container)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
mountPoints := addMountPoints(container)
|
|
config := &types.ContainerConfig120{
|
|
container.Config,
|
|
container.hostConfig.VolumeDriver,
|
|
}
|
|
|
|
return &types.ContainerJSON120{base, mountPoints, config}, nil
|
|
}
|
|
|
|
func (daemon *Daemon) getInspectData(ctx context.Context, container *Container) (*types.ContainerJSONBase, error) {
|
|
// make a copy to play with
|
|
hostConfig := *container.hostConfig
|
|
|
|
if children, err := daemon.children(ctx, container.Name); err == nil {
|
|
for linkAlias, child := range children {
|
|
hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
|
|
}
|
|
}
|
|
// we need this trick to preserve empty log driver, so
|
|
// container will use daemon defaults even if daemon change them
|
|
if hostConfig.LogConfig.Type == "" {
|
|
hostConfig.LogConfig.Type = daemon.defaultLogConfig.Type
|
|
}
|
|
|
|
if len(hostConfig.LogConfig.Config) == 0 {
|
|
hostConfig.LogConfig.Config = daemon.defaultLogConfig.Config
|
|
}
|
|
|
|
containerState := &types.ContainerState{
|
|
Status: container.State.StateString(),
|
|
Running: container.State.Running,
|
|
Paused: container.State.Paused,
|
|
Restarting: container.State.Restarting,
|
|
OOMKilled: container.State.OOMKilled,
|
|
Dead: container.State.Dead,
|
|
Pid: container.State.Pid,
|
|
ExitCode: container.State.ExitCode,
|
|
Error: container.State.Error,
|
|
StartedAt: container.State.StartedAt.Format(time.RFC3339Nano),
|
|
FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano),
|
|
}
|
|
|
|
contJSONBase := &types.ContainerJSONBase{
|
|
ID: container.ID,
|
|
Created: container.Created.Format(time.RFC3339Nano),
|
|
Path: container.Path,
|
|
Args: container.Args,
|
|
State: containerState,
|
|
Image: container.ImageID,
|
|
NetworkSettings: container.NetworkSettings,
|
|
LogPath: container.LogPath,
|
|
Name: container.Name,
|
|
RestartCount: container.RestartCount,
|
|
Driver: container.Driver,
|
|
ExecDriver: container.ExecDriver,
|
|
MountLabel: container.MountLabel,
|
|
ProcessLabel: container.ProcessLabel,
|
|
ExecIDs: container.getExecIDs(),
|
|
HostConfig: &hostConfig,
|
|
}
|
|
|
|
// Now set any platform-specific fields
|
|
contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase)
|
|
|
|
contJSONBase.GraphDriver.Name = container.Driver
|
|
graphDriverData, err := daemon.driver.GetMetadata(container.ID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
contJSONBase.GraphDriver.Data = graphDriverData
|
|
|
|
return contJSONBase, nil
|
|
}
|
|
|
|
// ContainerExecInspect returns low-level information about the exec
|
|
// command. An error is returned if the exec cannot be found.
|
|
func (daemon *Daemon) ContainerExecInspect(ctx context.Context, id string) (*ExecConfig, error) {
|
|
eConfig, err := daemon.getExecConfig(id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return eConfig, nil
|
|
}
|
|
|
|
// VolumeInspect looks up a volume by name. An error is returned if
|
|
// the volume cannot be found.
|
|
func (daemon *Daemon) VolumeInspect(ctx context.Context, name string) (*types.Volume, error) {
|
|
v, err := daemon.volumes.Get(name)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return volumeToAPIType(v), nil
|
|
}
|