2015-07-21 18:26:52 -04:00
|
|
|
// Package jsonfilelog provides the default Logger implementation for
|
|
|
|
// Docker logging. This logger logs to files on the host server in the
|
|
|
|
// JSON format.
|
2015-02-04 14:04:58 -05:00
|
|
|
package jsonfilelog
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2015-07-03 09:50:06 -04:00
|
|
|
"encoding/json"
|
2015-06-30 20:40:13 -04:00
|
|
|
"fmt"
|
2017-06-30 13:13:32 -04:00
|
|
|
"io"
|
2015-06-30 20:40:13 -04:00
|
|
|
"strconv"
|
2015-03-18 15:52:42 -04:00
|
|
|
"sync"
|
2015-02-04 14:04:58 -05:00
|
|
|
|
|
|
|
"github.com/docker/docker/daemon/logger"
|
2017-09-25 15:57:45 -04:00
|
|
|
"github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog"
|
2015-11-14 18:02:06 -05:00
|
|
|
"github.com/docker/docker/daemon/logger/loggerutils"
|
2017-09-25 15:52:42 -04:00
|
|
|
units "github.com/docker/go-units"
|
2017-06-30 13:13:32 -04:00
|
|
|
"github.com/pkg/errors"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2015-02-04 14:04:58 -05:00
|
|
|
)
|
|
|
|
|
2015-10-08 16:55:28 -04:00
|
|
|
// Name is the name of the file that the jsonlogger logs to.
|
|
|
|
const Name = "json-file"
|
2015-04-09 00:23:30 -04:00
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// JSONFileLogger is Logger implementation for default Docker logging.
|
2015-02-04 14:04:58 -05:00
|
|
|
type JSONFileLogger struct {
|
2017-06-30 13:13:32 -04:00
|
|
|
extra []byte // json-encoded extra attributes
|
|
|
|
|
|
|
|
mu sync.RWMutex
|
|
|
|
buf *bytes.Buffer // avoids allocating a new buffer on each call to `Log()`
|
|
|
|
closed bool
|
2016-02-23 21:07:38 -05:00
|
|
|
writer *loggerutils.RotateFileWriter
|
|
|
|
readers map[*logger.LogWatcher]struct{} // stores the active log followers
|
2015-04-09 00:23:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
if err := logger.RegisterLogDriver(Name, New); err != nil {
|
|
|
|
logrus.Fatal(err)
|
|
|
|
}
|
2015-06-30 20:40:13 -04:00
|
|
|
if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil {
|
|
|
|
logrus.Fatal(err)
|
|
|
|
}
|
2015-02-04 14:04:58 -05:00
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// New creates new JSONFileLogger which writes to filename passed in
|
|
|
|
// on given context.
|
2016-11-26 00:08:34 -05:00
|
|
|
func New(info logger.Info) (logger.Logger, error) {
|
2015-06-30 20:40:13 -04:00
|
|
|
var capval int64 = -1
|
2016-11-26 00:08:34 -05:00
|
|
|
if capacity, ok := info.Config["max-size"]; ok {
|
2015-06-30 20:40:13 -04:00
|
|
|
var err error
|
|
|
|
capval, err = units.FromHumanSize(capacity)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2015-07-21 18:26:52 -04:00
|
|
|
var maxFiles = 1
|
2016-11-26 00:08:34 -05:00
|
|
|
if maxFileString, ok := info.Config["max-file"]; ok {
|
2015-11-14 18:02:06 -05:00
|
|
|
var err error
|
2015-06-30 20:40:13 -04:00
|
|
|
maxFiles, err = strconv.Atoi(maxFileString)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if maxFiles < 1 {
|
2015-08-17 18:34:39 -04:00
|
|
|
return nil, fmt.Errorf("max-file cannot be less than 1")
|
2015-06-30 20:40:13 -04:00
|
|
|
}
|
|
|
|
}
|
2015-10-04 17:07:09 -04:00
|
|
|
|
2016-11-26 00:08:34 -05:00
|
|
|
writer, err := loggerutils.NewRotateFileWriter(info.LogPath, capval, maxFiles)
|
2015-11-14 18:02:06 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-10-04 17:07:09 -04:00
|
|
|
var extra []byte
|
2016-11-08 19:34:47 -05:00
|
|
|
attrs, err := info.ExtraAttributes(nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(attrs) > 0 {
|
2015-10-04 17:07:09 -04:00
|
|
|
var err error
|
|
|
|
extra, err = json.Marshal(attrs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-04 14:04:58 -05:00
|
|
|
return &JSONFileLogger{
|
2016-02-23 21:07:38 -05:00
|
|
|
buf: bytes.NewBuffer(nil),
|
|
|
|
writer: writer,
|
|
|
|
readers: make(map[*logger.LogWatcher]struct{}),
|
|
|
|
extra: extra,
|
2015-02-04 14:04:58 -05:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// Log converts logger.Message to jsonlog.JSONLog and serializes it to file.
|
2015-02-04 14:04:58 -05:00
|
|
|
func (l *JSONFileLogger) Log(msg *logger.Message) error {
|
2017-06-30 13:13:32 -04:00
|
|
|
l.mu.Lock()
|
|
|
|
err := writeMessageBuf(l.writer, msg, l.extra, l.buf)
|
|
|
|
l.buf.Reset()
|
|
|
|
l.mu.Unlock()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeMessageBuf(w io.Writer, m *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error {
|
|
|
|
if err := marshalMessage(m, extra, buf); err != nil {
|
|
|
|
logger.PutMessage(m)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
logger.PutMessage(m)
|
2017-09-25 15:52:42 -04:00
|
|
|
_, err := w.Write(buf.Bytes())
|
|
|
|
return errors.Wrap(err, "error writing log entry")
|
2017-06-30 13:13:32 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func marshalMessage(msg *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error {
|
|
|
|
logLine := msg.Line
|
Improve logging of long log lines
This change updates how we handle long lines of output from the
container. The previous logic used a bufio reader to read entire lines
of output from the container through an intermediate BytesPipe, and that
allowed the container to cause dockerd to consume an unconstrained
amount of memory as it attempted to collect a whole line of output, by
outputting data without newlines.
To avoid that, we replace the bufio reader with our own buffering scheme
that handles log lines up to 16k in length, breaking up anything longer
than that into multiple chunks. If we can dispense with noting this
detail properly at the end of output, we can switch from using
ReadBytes() to using ReadLine() instead. We add a field ("Partial") to
the log message structure to flag when we pass data to the log driver
that did not end with a newline.
The Line member of Message structures that we pass to log drivers is now
a slice into data which can be overwritten between calls to the log
driver's Log() method, so drivers which batch up Messages before
processing them need to take additional care: we add a function
(logger.CopyMessage()) that can be used to create a deep copy of a
Message structure, and modify the awslogs driver to use it.
We update the jsonfile log driver to append a "\n" to the data that it
logs to disk only when the Partial flag is false (it previously did so
unconditionally), to make its "logs" output correctly reproduce the data
as we received it.
Likewise, we modify the journald log driver to add a data field with
value CONTAINER_PARTIAL_MESSAGE=true to entries when the Partial flag is
true, and update its "logs" reader to refrain from appending a "\n" to
the data that it retrieves if it does not see this field/value pair (it
also previously did this unconditionally).
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> (github: nalind)
2016-05-24 14:12:47 -04:00
|
|
|
if !msg.Partial {
|
2017-06-30 13:13:32 -04:00
|
|
|
logLine = append(msg.Line, '\n')
|
Improve logging of long log lines
This change updates how we handle long lines of output from the
container. The previous logic used a bufio reader to read entire lines
of output from the container through an intermediate BytesPipe, and that
allowed the container to cause dockerd to consume an unconstrained
amount of memory as it attempted to collect a whole line of output, by
outputting data without newlines.
To avoid that, we replace the bufio reader with our own buffering scheme
that handles log lines up to 16k in length, breaking up anything longer
than that into multiple chunks. If we can dispense with noting this
detail properly at the end of output, we can switch from using
ReadBytes() to using ReadLine() instead. We add a field ("Partial") to
the log message structure to flag when we pass data to the log driver
that did not end with a newline.
The Line member of Message structures that we pass to log drivers is now
a slice into data which can be overwritten between calls to the log
driver's Log() method, so drivers which batch up Messages before
processing them need to take additional care: we add a function
(logger.CopyMessage()) that can be used to create a deep copy of a
Message structure, and modify the awslogs driver to use it.
We update the jsonfile log driver to append a "\n" to the data that it
logs to disk only when the Partial flag is false (it previously did so
unconditionally), to make its "logs" output correctly reproduce the data
as we received it.
Likewise, we modify the journald log driver to add a data field with
value CONTAINER_PARTIAL_MESSAGE=true to entries when the Partial flag is
true, and update its "logs" reader to refrain from appending a "\n" to
the data that it retrieves if it does not see this field/value pair (it
also previously did this unconditionally).
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> (github: nalind)
2016-05-24 14:12:47 -04:00
|
|
|
}
|
2017-09-22 15:37:16 -04:00
|
|
|
err := (&jsonlog.JSONLogs{
|
2017-06-30 13:13:32 -04:00
|
|
|
Log: logLine,
|
2015-10-04 17:07:09 -04:00
|
|
|
Stream: msg.Source,
|
2017-09-22 15:37:16 -04:00
|
|
|
Created: msg.Timestamp,
|
2017-06-30 13:13:32 -04:00
|
|
|
RawAttrs: extra,
|
|
|
|
}).MarshalJSONBuf(buf)
|
2015-02-04 14:04:58 -05:00
|
|
|
if err != nil {
|
2017-06-30 13:13:32 -04:00
|
|
|
return errors.Wrap(err, "error writing log message to buffer")
|
2015-02-04 14:04:58 -05:00
|
|
|
}
|
2017-06-30 13:13:32 -04:00
|
|
|
err = buf.WriteByte('\n')
|
|
|
|
return errors.Wrap(err, "error finalizing log buffer")
|
2015-06-30 20:40:13 -04:00
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// ValidateLogOpt looks for json specific log options max-file & max-size.
|
2015-06-30 20:40:13 -04:00
|
|
|
func ValidateLogOpt(cfg map[string]string) error {
|
|
|
|
for key := range cfg {
|
|
|
|
switch key {
|
|
|
|
case "max-file":
|
|
|
|
case "max-size":
|
2015-10-04 17:07:09 -04:00
|
|
|
case "labels":
|
|
|
|
case "env":
|
2016-11-08 19:34:47 -05:00
|
|
|
case "env-regex":
|
2015-06-30 20:40:13 -04:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown log opt '%s' for json-file log driver", key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// LogPath returns the location the given json logger logs to.
|
2015-04-09 00:23:30 -04:00
|
|
|
func (l *JSONFileLogger) LogPath() string {
|
2015-11-14 18:02:06 -05:00
|
|
|
return l.writer.LogPath()
|
2015-04-09 00:23:30 -04:00
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// Close closes underlying file and signals all readers to stop.
|
2015-02-04 14:04:58 -05:00
|
|
|
func (l *JSONFileLogger) Close() error {
|
2015-07-03 09:50:06 -04:00
|
|
|
l.mu.Lock()
|
2017-03-15 11:41:12 -04:00
|
|
|
l.closed = true
|
2015-11-14 18:02:06 -05:00
|
|
|
err := l.writer.Close()
|
2015-07-03 09:50:06 -04:00
|
|
|
for r := range l.readers {
|
|
|
|
r.Close()
|
|
|
|
delete(l.readers, r)
|
|
|
|
}
|
|
|
|
l.mu.Unlock()
|
|
|
|
return err
|
2015-02-04 14:04:58 -05:00
|
|
|
}
|
|
|
|
|
2015-07-21 18:26:52 -04:00
|
|
|
// Name returns name of this logger.
|
2015-02-04 14:04:58 -05:00
|
|
|
func (l *JSONFileLogger) Name() string {
|
2015-04-09 00:23:30 -04:00
|
|
|
return Name
|
2015-02-04 14:04:58 -05:00
|
|
|
}
|