moby--moby/daemon/logs.go

214 lines
6.0 KiB
Go
Raw Normal View History

package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"strconv"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
containertypes "github.com/docker/docker/api/types/container"
timetypes "github.com/docker/docker/api/types/time"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/logger"
logcache "github.com/docker/docker/daemon/logger/loggerutils/cache"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ContainerLogs copies the container's log channel to the channel provided in
// the config. If ContainerLogs returns an error, no messages have been copied.
// and the channel will be closed without data.
//
// if it returns nil, the config channel will be active and return log
// messages until it runs out or the context is canceled.
func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *types.ContainerLogsOptions) (messages <-chan *backend.LogMessage, isTTY bool, retErr error) {
lg := logrus.WithFields(logrus.Fields{
"module": "daemon",
"method": "(*Daemon).ContainerLogs",
"container": containerName,
})
if !(config.ShowStdout || config.ShowStderr) {
return nil, false, errdefs.InvalidParameter(errors.New("You must choose at least one stream"))
}
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return nil, false, err
}
if ctr.RemovalInProgress || ctr.Dead {
return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal"))
}
if ctr.HostConfig.LogConfig.Type == "none" {
return nil, false, logger.ErrReadLogsNotSupported{}
}
cLog, cLogCreated, err := daemon.getLogger(ctr)
if err != nil {
return nil, false, err
}
if cLogCreated {
defer func() {
if retErr != nil {
if err = cLog.Close(); err != nil {
logrus.Errorf("Error closing logger: %v", err)
}
}
}()
}
logReader, ok := cLog.(logger.LogReader)
if !ok {
return nil, false, logger.ErrReadLogsNotSupported{}
}
follow := config.Follow && !cLogCreated
tailLines, err := strconv.Atoi(config.Tail)
if err != nil {
tailLines = -1
}
var since time.Time
if config.Since != "" {
s, n, err := timetypes.ParseTimestamps(config.Since, 0)
if err != nil {
return nil, false, err
}
since = time.Unix(s, n)
}
var until time.Time
if config.Until != "" && config.Until != "0" {
s, n, err := timetypes.ParseTimestamps(config.Until, 0)
if err != nil {
return nil, false, err
}
until = time.Unix(s, n)
}
readConfig := logger.ReadConfig{
Since: since,
Until: until,
Tail: tailLines,
Follow: follow,
}
logs := logReader.ReadLogs(readConfig)
// past this point, we can't possibly return any errors, so we can just
// start a goroutine and return to tell the caller not to expect errors
// (if the caller wants to give up on logs, they have to cancel the context)
// this goroutine functions as a shim between the logger and the caller.
messageChan := make(chan *backend.LogMessage, 1)
go func() {
if cLogCreated {
defer func() {
if err = cLog.Close(); err != nil {
logrus.Errorf("Error closing logger: %v", err)
}
}()
}
daemon.ContainerLogs(): fix resource leak on follow When daemon.ContainerLogs() is called with options.follow=true (as in "docker logs --follow"), the "loggerutils.followLogs()" function never returns (even then the logs consumer is gone). As a result, all the resources associated with it (including an opened file descriptor for the log file being read, two FDs for a pipe, and two FDs for inotify watch) are never released. If this is repeated (such as by running "docker logs --follow" and pressing Ctrl-C a few times), this results in DoS caused by either hitting the limit of inotify watches, or the limit of opened files. The only cure is daemon restart. Apparently, what happens is: 1. logs producer (a container) is gone, calling (*LogWatcher).Close() for all its readers (daemon/logger/jsonfilelog/jsonfilelog.go:175). 2. WatchClose() is properly handled by a dedicated goroutine in followLogs(), cancelling the context. 3. Upon receiving the ctx.Done(), the code in followLogs() (daemon/logger/loggerutils/logfile.go#L626-L638) keeps to send messages _synchronously_ (which is OK for now). 4. Logs consumer is gone (Ctrl-C is pressed on a terminal running "docker logs --follow"). Method (*LogWatcher).Close() is properly called (see daemon/logs.go:114). Since it was called before and due to to once.Do(), nothing happens (which is kinda good, as otherwise it will panic on closing a closed channel). 5. A goroutine (see item 3 above) keeps sending log messages synchronously to the logWatcher.Msg channel. Since the channel reader is gone, the channel send operation blocks forever, and resource cleanup set up in defer statements at the beginning of followLogs() never happens. Alas, the fix is somewhat complicated: 1. Distinguish between close from logs producer and logs consumer. To that effect, - yet another channel is added to LogWatcher(); - {Watch,}Close() are renamed to {Watch,}ProducerGone(); - {Watch,}ConsumerGone() are added; *NOTE* that ProducerGone()/WatchProducerGone() pair is ONLY needed in order to stop ConsumerLogs(follow=true) when a container is stopped; otherwise we're not interested in it. In other words, we're only using it in followLogs(). 2. Code that was doing (logWatcher*).Close() is modified to either call ProducerGone() or ConsumerGone(), depending on the context. 3. Code that was waiting for WatchClose() is modified to wait for either ConsumerGone() or ProducerGone(), or both, depending on the context. 4. followLogs() are modified accordingly: - context cancellation is happening on WatchProducerGone(), and once it's received the FileWatcher is closed and waitRead() returns errDone on EOF (i.e. log rotation handling logic is disabled); - due to this, code that was writing synchronously to logWatcher.Msg can be and is removed as the code above it handles this case; - function returns once ConsumerGone is received, freeing all the resources -- this is the bugfix itself. While at it, 1. Let's also remove the ctx usage to simplify the code a bit. It was introduced by commit a69a59ffc7e3d ("Decouple removing the fileWatcher from reading") in order to fix a bug. The bug was actually a deadlock in fsnotify, and the fix was just a workaround. Since then the fsnofify bug has been fixed, and a new fsnotify was vendored in. For more details, please see https://github.com/moby/moby/pull/27782#issuecomment-416794490 2. Since `(*filePoller).Close()` is fixed to remove all the files being watched, there is no need to explicitly call fileWatcher.Remove(name) anymore, so get rid of the extra code. Should fix https://github.com/moby/moby/issues/37391 Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2018-08-01 04:03:55 +00:00
// signal that the log reader is gone
defer logs.ConsumerGone()
// close the messages channel. closing is the only way to signal above
// that we're doing with logs (other than context cancel i guess).
defer close(messageChan)
lg.Debug("begin logs")
defer lg.Debugf("end logs (%v)", ctx.Err())
for {
select {
// i do not believe as the system is currently designed any error
// is possible, but we should be prepared to handle it anyway. if
// we do get an error, copy only the error field to a new object so
// we don't end up with partial data in the other fields
case err := <-logs.Err:
lg.Errorf("Error streaming logs: %v", err)
select {
case <-ctx.Done():
case messageChan <- &backend.LogMessage{Err: err}:
}
return
case <-ctx.Done():
return
case msg, ok := <-logs.Msg:
// there is some kind of pool or ring buffer in the logger that
// produces these messages, and a possible future optimization
// might be to use that pool and reuse message objects
if !ok {
return
}
m := msg.AsLogMessage() // just a pointer conversion, does not copy data
// there could be a case where the reader stops accepting
// messages and the context is canceled. we need to check that
// here, or otherwise we risk blocking forever on the message
// send.
select {
case <-ctx.Done():
return
case messageChan <- m:
}
}
}
}()
return messageChan, ctr.Config.Tty, nil
}
func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) {
container.Lock()
if container.State.Running {
l = container.LogDriver
}
container.Unlock()
if l == nil {
created = true
l, err = container.StartLogger()
}
return
}
// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified.
func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error {
if cfg.Type == "" {
cfg.Type = daemon.defaultLogConfig.Type
}
if cfg.Config == nil {
cfg.Config = make(map[string]string)
}
if cfg.Type == daemon.defaultLogConfig.Type {
for k, v := range daemon.defaultLogConfig.Config {
if _, ok := cfg.Config[k]; !ok {
cfg.Config[k] = v
}
}
}
logcache.MergeDefaultLogConfig(cfg.Config, daemon.defaultLogConfig.Config)
return logger.ValidateLogOpts(cfg.Type, cfg.Config)
}
func (daemon *Daemon) setupDefaultLogConfig() error {
config := daemon.configStore
if len(config.LogConfig.Config) > 0 {
if err := logger.ValidateLogOpts(config.LogConfig.Type, config.LogConfig.Config); err != nil {
return errors.Wrap(err, "failed to set log opts")
}
}
daemon.defaultLogConfig = containertypes.LogConfig{
Type: config.LogConfig.Type,
Config: config.LogConfig.Config,
}
logrus.Debugf("Using default logging driver %s", daemon.defaultLogConfig.Type)
return nil
}