2015-07-21 18:26:52 -04:00
|
|
|
// Package logger defines interfaces that logger drivers implement to
|
|
|
|
// log messages.
|
|
|
|
//
|
|
|
|
// The other half of a logger driver is the implementation of the
|
|
|
|
// factory, which holds the contextual instance information that
|
|
|
|
// allows multiple loggers of the same type to perform different
|
|
|
|
// actions, such as logging to different locations.
|
2018-02-05 16:05:59 -05:00
|
|
|
package logger // import "github.com/docker/docker/daemon/logger"
|
2015-02-03 18:41:26 -05:00
|
|
|
|
2015-04-09 00:23:30 -04:00
|
|
|
import (
|
2016-06-27 17:14:17 -04:00
|
|
|
"sync"
|
2015-04-09 00:23:30 -04:00
|
|
|
"time"
|
2015-07-03 09:50:06 -04:00
|
|
|
|
2017-03-20 13:07:04 -04:00
|
|
|
"github.com/docker/docker/api/types/backend"
|
2015-04-09 00:23:30 -04:00
|
|
|
)
|
|
|
|
|
2017-07-19 10:20:13 -04:00
|
|
|
// ErrReadLogsNotSupported is returned when the underlying log driver does not support reading
|
|
|
|
type ErrReadLogsNotSupported struct{}
|
|
|
|
|
|
|
|
func (ErrReadLogsNotSupported) Error() string {
|
|
|
|
return "configured logging driver does not support reading"
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotImplemented makes this error implement the `NotImplemented` interface from api/errdefs
|
|
|
|
func (ErrReadLogsNotSupported) NotImplemented() {}
|
2015-07-03 09:50:06 -04:00
|
|
|
|
|
|
|
const (
|
|
|
|
logWatcherBufferSize = 4096
|
|
|
|
)
|
2015-02-03 18:41:26 -05:00
|
|
|
|
2016-12-12 09:54:20 -05:00
|
|
|
var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }}
|
|
|
|
|
|
|
|
// NewMessage returns a new message from the message sync.Pool
|
|
|
|
func NewMessage() *Message {
|
|
|
|
return messagePool.Get().(*Message)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PutMessage puts the specified message back n the message pool.
|
|
|
|
// The message fields are reset before putting into the pool.
|
|
|
|
func PutMessage(msg *Message) {
|
|
|
|
msg.reset()
|
|
|
|
messagePool.Put(msg)
|
|
|
|
}
|
|
|
|
|
2018-02-10 06:26:20 -05:00
|
|
|
// Message is data structure that represents piece of output produced by some
|
Improve logging of long log lines
This change updates how we handle long lines of output from the
container. The previous logic used a bufio reader to read entire lines
of output from the container through an intermediate BytesPipe, and that
allowed the container to cause dockerd to consume an unconstrained
amount of memory as it attempted to collect a whole line of output, by
outputting data without newlines.
To avoid that, we replace the bufio reader with our own buffering scheme
that handles log lines up to 16k in length, breaking up anything longer
than that into multiple chunks. If we can dispense with noting this
detail properly at the end of output, we can switch from using
ReadBytes() to using ReadLine() instead. We add a field ("Partial") to
the log message structure to flag when we pass data to the log driver
that did not end with a newline.
The Line member of Message structures that we pass to log drivers is now
a slice into data which can be overwritten between calls to the log
driver's Log() method, so drivers which batch up Messages before
processing them need to take additional care: we add a function
(logger.CopyMessage()) that can be used to create a deep copy of a
Message structure, and modify the awslogs driver to use it.
We update the jsonfile log driver to append a "\n" to the data that it
logs to disk only when the Partial flag is false (it previously did so
unconditionally), to make its "logs" output correctly reproduce the data
as we received it.
Likewise, we modify the journald log driver to add a data field with
value CONTAINER_PARTIAL_MESSAGE=true to entries when the Partial flag is
true, and update its "logs" reader to refrain from appending a "\n" to
the data that it retrieves if it does not see this field/value pair (it
also previously did this unconditionally).
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> (github: nalind)
2016-05-24 14:12:47 -04:00
|
|
|
// container. The Line member is a slice of an array whose contents can be
|
|
|
|
// changed after a log driver's Log() method returns.
|
2017-03-20 13:07:04 -04:00
|
|
|
//
|
|
|
|
// Message is subtyped from backend.LogMessage because there is a lot of
|
|
|
|
// internal complexity around the Message type that should not be exposed
|
|
|
|
// to any package not explicitly importing the logger type.
|
|
|
|
//
|
2016-12-12 09:54:20 -05:00
|
|
|
// Any changes made to this struct must also be updated in the `reset` function
|
2017-03-20 13:07:04 -04:00
|
|
|
type Message backend.LogMessage
|
Improve logging of long log lines
This change updates how we handle long lines of output from the
container. The previous logic used a bufio reader to read entire lines
of output from the container through an intermediate BytesPipe, and that
allowed the container to cause dockerd to consume an unconstrained
amount of memory as it attempted to collect a whole line of output, by
outputting data without newlines.
To avoid that, we replace the bufio reader with our own buffering scheme
that handles log lines up to 16k in length, breaking up anything longer
than that into multiple chunks. If we can dispense with noting this
detail properly at the end of output, we can switch from using
ReadBytes() to using ReadLine() instead. We add a field ("Partial") to
the log message structure to flag when we pass data to the log driver
that did not end with a newline.
The Line member of Message structures that we pass to log drivers is now
a slice into data which can be overwritten between calls to the log
driver's Log() method, so drivers which batch up Messages before
processing them need to take additional care: we add a function
(logger.CopyMessage()) that can be used to create a deep copy of a
Message structure, and modify the awslogs driver to use it.
We update the jsonfile log driver to append a "\n" to the data that it
logs to disk only when the Partial flag is false (it previously did so
unconditionally), to make its "logs" output correctly reproduce the data
as we received it.
Likewise, we modify the journald log driver to add a data field with
value CONTAINER_PARTIAL_MESSAGE=true to entries when the Partial flag is
true, and update its "logs" reader to refrain from appending a "\n" to
the data that it retrieves if it does not see this field/value pair (it
also previously did this unconditionally).
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> (github: nalind)
2016-05-24 14:12:47 -04:00
|
|
|
|
2016-12-12 09:54:20 -05:00
|
|
|
// reset sets the message back to default values
|
|
|
|
// This is used when putting a message back into the message pool.
|
|
|
|
// Any changes to the `Message` struct should be reflected here.
|
|
|
|
func (m *Message) reset() {
|
|
|
|
m.Line = m.Line[:0]
|
|
|
|
m.Source = ""
|
|
|
|
m.Attrs = nil
|
2017-12-18 21:26:55 -05:00
|
|
|
m.PLogMetaData = nil
|
2016-12-12 09:54:20 -05:00
|
|
|
|
2017-03-20 13:07:04 -04:00
|
|
|
m.Err = nil
|
2016-04-08 12:15:08 -04:00
|
|
|
}
|
|
|
|
|
2017-03-20 13:07:04 -04:00
|
|
|
// AsLogMessage returns a pointer to the message as a pointer to
|
|
|
|
// backend.LogMessage, which is an identical type with a different purpose
|
|
|
|
func (m *Message) AsLogMessage() *backend.LogMessage {
|
|
|
|
return (*backend.LogMessage)(m)
|
2015-02-03 18:41:26 -05:00
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// Logger is the interface for docker logging drivers.
|
2015-02-03 18:41:26 -05:00
|
|
|
type Logger interface {
|
|
|
|
Log(*Message) error
|
|
|
|
Name() string
|
|
|
|
Close() error
|
2015-06-30 20:40:13 -04:00
|
|
|
}
|
|
|
|
|
2017-08-22 13:52:52 -04:00
|
|
|
// SizedLogger is the interface for logging drivers that can control
|
|
|
|
// the size of buffer used for their messages.
|
|
|
|
type SizedLogger interface {
|
|
|
|
Logger
|
|
|
|
BufSize() int
|
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// ReadConfig is the configuration passed into ReadLogs.
|
2015-07-03 09:50:06 -04:00
|
|
|
type ReadConfig struct {
|
|
|
|
Since time.Time
|
2017-04-28 07:53:00 -04:00
|
|
|
Until time.Time
|
2015-07-03 09:50:06 -04:00
|
|
|
Tail int
|
|
|
|
Follow bool
|
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// LogReader is the interface for reading log messages for loggers that support reading.
|
2015-07-03 09:50:06 -04:00
|
|
|
type LogReader interface {
|
|
|
|
// Read logs from underlying logging backend
|
|
|
|
ReadLogs(ReadConfig) *LogWatcher
|
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// LogWatcher is used when consuming logs read from the LogReader interface.
|
2015-07-03 09:50:06 -04:00
|
|
|
type LogWatcher struct {
|
2015-07-21 18:26:52 -04:00
|
|
|
// For sending log messages to a reader.
|
2015-07-03 09:50:06 -04:00
|
|
|
Msg chan *Message
|
2018-10-05 09:37:42 -04:00
|
|
|
// For sending error messages that occur while reading logs.
|
daemon.ContainerLogs(): fix resource leak on follow
When daemon.ContainerLogs() is called with options.follow=true
(as in "docker logs --follow"), the "loggerutils.followLogs()"
function never returns (even then the logs consumer is gone).
As a result, all the resources associated with it (including
an opened file descriptor for the log file being read, two FDs
for a pipe, and two FDs for inotify watch) are never released.
If this is repeated (such as by running "docker logs --follow"
and pressing Ctrl-C a few times), this results in DoS caused by
either hitting the limit of inotify watches, or the limit of
opened files. The only cure is daemon restart.
Apparently, what happens is:
1. logs producer (a container) is gone, calling (*LogWatcher).Close()
for all its readers (daemon/logger/jsonfilelog/jsonfilelog.go:175).
2. WatchClose() is properly handled by a dedicated goroutine in
followLogs(), cancelling the context.
3. Upon receiving the ctx.Done(), the code in followLogs()
(daemon/logger/loggerutils/logfile.go#L626-L638) keeps to
send messages _synchronously_ (which is OK for now).
4. Logs consumer is gone (Ctrl-C is pressed on a terminal running
"docker logs --follow"). Method (*LogWatcher).Close() is properly
called (see daemon/logs.go:114). Since it was called before and
due to to once.Do(), nothing happens (which is kinda good, as
otherwise it will panic on closing a closed channel).
5. A goroutine (see item 3 above) keeps sending log messages
synchronously to the logWatcher.Msg channel. Since the
channel reader is gone, the channel send operation blocks forever,
and resource cleanup set up in defer statements at the beginning
of followLogs() never happens.
Alas, the fix is somewhat complicated:
1. Distinguish between close from logs producer and logs consumer.
To that effect,
- yet another channel is added to LogWatcher();
- {Watch,}Close() are renamed to {Watch,}ProducerGone();
- {Watch,}ConsumerGone() are added;
*NOTE* that ProducerGone()/WatchProducerGone() pair is ONLY needed
in order to stop ConsumerLogs(follow=true) when a container is stopped;
otherwise we're not interested in it. In other words, we're only
using it in followLogs().
2. Code that was doing (logWatcher*).Close() is modified to either call
ProducerGone() or ConsumerGone(), depending on the context.
3. Code that was waiting for WatchClose() is modified to wait for
either ConsumerGone() or ProducerGone(), or both, depending on the
context.
4. followLogs() are modified accordingly:
- context cancellation is happening on WatchProducerGone(),
and once it's received the FileWatcher is closed and waitRead()
returns errDone on EOF (i.e. log rotation handling logic is disabled);
- due to this, code that was writing synchronously to logWatcher.Msg
can be and is removed as the code above it handles this case;
- function returns once ConsumerGone is received, freeing all the
resources -- this is the bugfix itself.
While at it,
1. Let's also remove the ctx usage to simplify the code a bit.
It was introduced by commit a69a59ffc7e3d ("Decouple removing the
fileWatcher from reading") in order to fix a bug. The bug was actually
a deadlock in fsnotify, and the fix was just a workaround. Since then
the fsnofify bug has been fixed, and a new fsnotify was vendored in.
For more details, please see
https://github.com/moby/moby/pull/27782#issuecomment-416794490
2. Since `(*filePoller).Close()` is fixed to remove all the files
being watched, there is no need to explicitly call
fileWatcher.Remove(name) anymore, so get rid of the extra code.
Should fix https://github.com/moby/moby/issues/37391
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2018-08-01 00:03:55 -04:00
|
|
|
Err chan error
|
|
|
|
producerOnce sync.Once
|
|
|
|
producerGone chan struct{}
|
|
|
|
consumerOnce sync.Once
|
|
|
|
consumerGone chan struct{}
|
2015-07-03 09:50:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewLogWatcher returns a new LogWatcher.
|
|
|
|
func NewLogWatcher() *LogWatcher {
|
|
|
|
return &LogWatcher{
|
daemon.ContainerLogs(): fix resource leak on follow
When daemon.ContainerLogs() is called with options.follow=true
(as in "docker logs --follow"), the "loggerutils.followLogs()"
function never returns (even then the logs consumer is gone).
As a result, all the resources associated with it (including
an opened file descriptor for the log file being read, two FDs
for a pipe, and two FDs for inotify watch) are never released.
If this is repeated (such as by running "docker logs --follow"
and pressing Ctrl-C a few times), this results in DoS caused by
either hitting the limit of inotify watches, or the limit of
opened files. The only cure is daemon restart.
Apparently, what happens is:
1. logs producer (a container) is gone, calling (*LogWatcher).Close()
for all its readers (daemon/logger/jsonfilelog/jsonfilelog.go:175).
2. WatchClose() is properly handled by a dedicated goroutine in
followLogs(), cancelling the context.
3. Upon receiving the ctx.Done(), the code in followLogs()
(daemon/logger/loggerutils/logfile.go#L626-L638) keeps to
send messages _synchronously_ (which is OK for now).
4. Logs consumer is gone (Ctrl-C is pressed on a terminal running
"docker logs --follow"). Method (*LogWatcher).Close() is properly
called (see daemon/logs.go:114). Since it was called before and
due to to once.Do(), nothing happens (which is kinda good, as
otherwise it will panic on closing a closed channel).
5. A goroutine (see item 3 above) keeps sending log messages
synchronously to the logWatcher.Msg channel. Since the
channel reader is gone, the channel send operation blocks forever,
and resource cleanup set up in defer statements at the beginning
of followLogs() never happens.
Alas, the fix is somewhat complicated:
1. Distinguish between close from logs producer and logs consumer.
To that effect,
- yet another channel is added to LogWatcher();
- {Watch,}Close() are renamed to {Watch,}ProducerGone();
- {Watch,}ConsumerGone() are added;
*NOTE* that ProducerGone()/WatchProducerGone() pair is ONLY needed
in order to stop ConsumerLogs(follow=true) when a container is stopped;
otherwise we're not interested in it. In other words, we're only
using it in followLogs().
2. Code that was doing (logWatcher*).Close() is modified to either call
ProducerGone() or ConsumerGone(), depending on the context.
3. Code that was waiting for WatchClose() is modified to wait for
either ConsumerGone() or ProducerGone(), or both, depending on the
context.
4. followLogs() are modified accordingly:
- context cancellation is happening on WatchProducerGone(),
and once it's received the FileWatcher is closed and waitRead()
returns errDone on EOF (i.e. log rotation handling logic is disabled);
- due to this, code that was writing synchronously to logWatcher.Msg
can be and is removed as the code above it handles this case;
- function returns once ConsumerGone is received, freeing all the
resources -- this is the bugfix itself.
While at it,
1. Let's also remove the ctx usage to simplify the code a bit.
It was introduced by commit a69a59ffc7e3d ("Decouple removing the
fileWatcher from reading") in order to fix a bug. The bug was actually
a deadlock in fsnotify, and the fix was just a workaround. Since then
the fsnofify bug has been fixed, and a new fsnotify was vendored in.
For more details, please see
https://github.com/moby/moby/pull/27782#issuecomment-416794490
2. Since `(*filePoller).Close()` is fixed to remove all the files
being watched, there is no need to explicitly call
fileWatcher.Remove(name) anymore, so get rid of the extra code.
Should fix https://github.com/moby/moby/issues/37391
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2018-08-01 00:03:55 -04:00
|
|
|
Msg: make(chan *Message, logWatcherBufferSize),
|
|
|
|
Err: make(chan error, 1),
|
|
|
|
producerGone: make(chan struct{}),
|
|
|
|
consumerGone: make(chan struct{}),
|
2015-07-03 09:50:06 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
daemon.ContainerLogs(): fix resource leak on follow
When daemon.ContainerLogs() is called with options.follow=true
(as in "docker logs --follow"), the "loggerutils.followLogs()"
function never returns (even then the logs consumer is gone).
As a result, all the resources associated with it (including
an opened file descriptor for the log file being read, two FDs
for a pipe, and two FDs for inotify watch) are never released.
If this is repeated (such as by running "docker logs --follow"
and pressing Ctrl-C a few times), this results in DoS caused by
either hitting the limit of inotify watches, or the limit of
opened files. The only cure is daemon restart.
Apparently, what happens is:
1. logs producer (a container) is gone, calling (*LogWatcher).Close()
for all its readers (daemon/logger/jsonfilelog/jsonfilelog.go:175).
2. WatchClose() is properly handled by a dedicated goroutine in
followLogs(), cancelling the context.
3. Upon receiving the ctx.Done(), the code in followLogs()
(daemon/logger/loggerutils/logfile.go#L626-L638) keeps to
send messages _synchronously_ (which is OK for now).
4. Logs consumer is gone (Ctrl-C is pressed on a terminal running
"docker logs --follow"). Method (*LogWatcher).Close() is properly
called (see daemon/logs.go:114). Since it was called before and
due to to once.Do(), nothing happens (which is kinda good, as
otherwise it will panic on closing a closed channel).
5. A goroutine (see item 3 above) keeps sending log messages
synchronously to the logWatcher.Msg channel. Since the
channel reader is gone, the channel send operation blocks forever,
and resource cleanup set up in defer statements at the beginning
of followLogs() never happens.
Alas, the fix is somewhat complicated:
1. Distinguish between close from logs producer and logs consumer.
To that effect,
- yet another channel is added to LogWatcher();
- {Watch,}Close() are renamed to {Watch,}ProducerGone();
- {Watch,}ConsumerGone() are added;
*NOTE* that ProducerGone()/WatchProducerGone() pair is ONLY needed
in order to stop ConsumerLogs(follow=true) when a container is stopped;
otherwise we're not interested in it. In other words, we're only
using it in followLogs().
2. Code that was doing (logWatcher*).Close() is modified to either call
ProducerGone() or ConsumerGone(), depending on the context.
3. Code that was waiting for WatchClose() is modified to wait for
either ConsumerGone() or ProducerGone(), or both, depending on the
context.
4. followLogs() are modified accordingly:
- context cancellation is happening on WatchProducerGone(),
and once it's received the FileWatcher is closed and waitRead()
returns errDone on EOF (i.e. log rotation handling logic is disabled);
- due to this, code that was writing synchronously to logWatcher.Msg
can be and is removed as the code above it handles this case;
- function returns once ConsumerGone is received, freeing all the
resources -- this is the bugfix itself.
While at it,
1. Let's also remove the ctx usage to simplify the code a bit.
It was introduced by commit a69a59ffc7e3d ("Decouple removing the
fileWatcher from reading") in order to fix a bug. The bug was actually
a deadlock in fsnotify, and the fix was just a workaround. Since then
the fsnofify bug has been fixed, and a new fsnotify was vendored in.
For more details, please see
https://github.com/moby/moby/pull/27782#issuecomment-416794490
2. Since `(*filePoller).Close()` is fixed to remove all the files
being watched, there is no need to explicitly call
fileWatcher.Remove(name) anymore, so get rid of the extra code.
Should fix https://github.com/moby/moby/issues/37391
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2018-08-01 00:03:55 -04:00
|
|
|
// ProducerGone notifies the underlying log reader that
|
|
|
|
// the logs producer (a container) is gone.
|
|
|
|
func (w *LogWatcher) ProducerGone() {
|
2015-07-31 16:49:07 -04:00
|
|
|
// only close if not already closed
|
daemon.ContainerLogs(): fix resource leak on follow
When daemon.ContainerLogs() is called with options.follow=true
(as in "docker logs --follow"), the "loggerutils.followLogs()"
function never returns (even then the logs consumer is gone).
As a result, all the resources associated with it (including
an opened file descriptor for the log file being read, two FDs
for a pipe, and two FDs for inotify watch) are never released.
If this is repeated (such as by running "docker logs --follow"
and pressing Ctrl-C a few times), this results in DoS caused by
either hitting the limit of inotify watches, or the limit of
opened files. The only cure is daemon restart.
Apparently, what happens is:
1. logs producer (a container) is gone, calling (*LogWatcher).Close()
for all its readers (daemon/logger/jsonfilelog/jsonfilelog.go:175).
2. WatchClose() is properly handled by a dedicated goroutine in
followLogs(), cancelling the context.
3. Upon receiving the ctx.Done(), the code in followLogs()
(daemon/logger/loggerutils/logfile.go#L626-L638) keeps to
send messages _synchronously_ (which is OK for now).
4. Logs consumer is gone (Ctrl-C is pressed on a terminal running
"docker logs --follow"). Method (*LogWatcher).Close() is properly
called (see daemon/logs.go:114). Since it was called before and
due to to once.Do(), nothing happens (which is kinda good, as
otherwise it will panic on closing a closed channel).
5. A goroutine (see item 3 above) keeps sending log messages
synchronously to the logWatcher.Msg channel. Since the
channel reader is gone, the channel send operation blocks forever,
and resource cleanup set up in defer statements at the beginning
of followLogs() never happens.
Alas, the fix is somewhat complicated:
1. Distinguish between close from logs producer and logs consumer.
To that effect,
- yet another channel is added to LogWatcher();
- {Watch,}Close() are renamed to {Watch,}ProducerGone();
- {Watch,}ConsumerGone() are added;
*NOTE* that ProducerGone()/WatchProducerGone() pair is ONLY needed
in order to stop ConsumerLogs(follow=true) when a container is stopped;
otherwise we're not interested in it. In other words, we're only
using it in followLogs().
2. Code that was doing (logWatcher*).Close() is modified to either call
ProducerGone() or ConsumerGone(), depending on the context.
3. Code that was waiting for WatchClose() is modified to wait for
either ConsumerGone() or ProducerGone(), or both, depending on the
context.
4. followLogs() are modified accordingly:
- context cancellation is happening on WatchProducerGone(),
and once it's received the FileWatcher is closed and waitRead()
returns errDone on EOF (i.e. log rotation handling logic is disabled);
- due to this, code that was writing synchronously to logWatcher.Msg
can be and is removed as the code above it handles this case;
- function returns once ConsumerGone is received, freeing all the
resources -- this is the bugfix itself.
While at it,
1. Let's also remove the ctx usage to simplify the code a bit.
It was introduced by commit a69a59ffc7e3d ("Decouple removing the
fileWatcher from reading") in order to fix a bug. The bug was actually
a deadlock in fsnotify, and the fix was just a workaround. Since then
the fsnofify bug has been fixed, and a new fsnotify was vendored in.
For more details, please see
https://github.com/moby/moby/pull/27782#issuecomment-416794490
2. Since `(*filePoller).Close()` is fixed to remove all the files
being watched, there is no need to explicitly call
fileWatcher.Remove(name) anymore, so get rid of the extra code.
Should fix https://github.com/moby/moby/issues/37391
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2018-08-01 00:03:55 -04:00
|
|
|
w.producerOnce.Do(func() {
|
|
|
|
close(w.producerGone)
|
2016-06-27 17:14:17 -04:00
|
|
|
})
|
2015-07-03 09:50:06 -04:00
|
|
|
}
|
|
|
|
|
daemon.ContainerLogs(): fix resource leak on follow
When daemon.ContainerLogs() is called with options.follow=true
(as in "docker logs --follow"), the "loggerutils.followLogs()"
function never returns (even then the logs consumer is gone).
As a result, all the resources associated with it (including
an opened file descriptor for the log file being read, two FDs
for a pipe, and two FDs for inotify watch) are never released.
If this is repeated (such as by running "docker logs --follow"
and pressing Ctrl-C a few times), this results in DoS caused by
either hitting the limit of inotify watches, or the limit of
opened files. The only cure is daemon restart.
Apparently, what happens is:
1. logs producer (a container) is gone, calling (*LogWatcher).Close()
for all its readers (daemon/logger/jsonfilelog/jsonfilelog.go:175).
2. WatchClose() is properly handled by a dedicated goroutine in
followLogs(), cancelling the context.
3. Upon receiving the ctx.Done(), the code in followLogs()
(daemon/logger/loggerutils/logfile.go#L626-L638) keeps to
send messages _synchronously_ (which is OK for now).
4. Logs consumer is gone (Ctrl-C is pressed on a terminal running
"docker logs --follow"). Method (*LogWatcher).Close() is properly
called (see daemon/logs.go:114). Since it was called before and
due to to once.Do(), nothing happens (which is kinda good, as
otherwise it will panic on closing a closed channel).
5. A goroutine (see item 3 above) keeps sending log messages
synchronously to the logWatcher.Msg channel. Since the
channel reader is gone, the channel send operation blocks forever,
and resource cleanup set up in defer statements at the beginning
of followLogs() never happens.
Alas, the fix is somewhat complicated:
1. Distinguish between close from logs producer and logs consumer.
To that effect,
- yet another channel is added to LogWatcher();
- {Watch,}Close() are renamed to {Watch,}ProducerGone();
- {Watch,}ConsumerGone() are added;
*NOTE* that ProducerGone()/WatchProducerGone() pair is ONLY needed
in order to stop ConsumerLogs(follow=true) when a container is stopped;
otherwise we're not interested in it. In other words, we're only
using it in followLogs().
2. Code that was doing (logWatcher*).Close() is modified to either call
ProducerGone() or ConsumerGone(), depending on the context.
3. Code that was waiting for WatchClose() is modified to wait for
either ConsumerGone() or ProducerGone(), or both, depending on the
context.
4. followLogs() are modified accordingly:
- context cancellation is happening on WatchProducerGone(),
and once it's received the FileWatcher is closed and waitRead()
returns errDone on EOF (i.e. log rotation handling logic is disabled);
- due to this, code that was writing synchronously to logWatcher.Msg
can be and is removed as the code above it handles this case;
- function returns once ConsumerGone is received, freeing all the
resources -- this is the bugfix itself.
While at it,
1. Let's also remove the ctx usage to simplify the code a bit.
It was introduced by commit a69a59ffc7e3d ("Decouple removing the
fileWatcher from reading") in order to fix a bug. The bug was actually
a deadlock in fsnotify, and the fix was just a workaround. Since then
the fsnofify bug has been fixed, and a new fsnotify was vendored in.
For more details, please see
https://github.com/moby/moby/pull/27782#issuecomment-416794490
2. Since `(*filePoller).Close()` is fixed to remove all the files
being watched, there is no need to explicitly call
fileWatcher.Remove(name) anymore, so get rid of the extra code.
Should fix https://github.com/moby/moby/issues/37391
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2018-08-01 00:03:55 -04:00
|
|
|
// WatchProducerGone returns a channel receiver that receives notification
|
|
|
|
// once the logs producer (a container) is gone.
|
|
|
|
func (w *LogWatcher) WatchProducerGone() <-chan struct{} {
|
|
|
|
return w.producerGone
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConsumerGone notifies that the logs consumer is gone.
|
|
|
|
func (w *LogWatcher) ConsumerGone() {
|
|
|
|
// only close if not already closed
|
|
|
|
w.consumerOnce.Do(func() {
|
|
|
|
close(w.consumerGone)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// WatchConsumerGone returns a channel receiver that receives notification
|
|
|
|
// when the log watcher consumer is gone.
|
|
|
|
func (w *LogWatcher) WatchConsumerGone() <-chan struct{} {
|
|
|
|
return w.consumerGone
|
2015-02-03 18:41:26 -05:00
|
|
|
}
|
2016-11-14 13:53:53 -05:00
|
|
|
|
2018-02-10 06:26:20 -05:00
|
|
|
// Capability defines the list of capabilities that a driver can implement
|
2016-11-14 13:53:53 -05:00
|
|
|
// These capabilities are not required to be a logging driver, however do
|
|
|
|
// determine how a logging driver can be used
|
|
|
|
type Capability struct {
|
|
|
|
// Determines if a log driver can read back logs
|
|
|
|
ReadLogs bool
|
|
|
|
}
|
2017-07-17 16:29:11 -04:00
|
|
|
|
|
|
|
// MarshalFunc is a func that marshals a message into an arbitrary format
|
|
|
|
type MarshalFunc func(*Message) ([]byte, error)
|