1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/daemon/logger/jsonfilelog/read.go
Cory Snider 01915a725e daemon/logger: follow LogFile without file watches
File watches have been a source of complexity and unreliability in the
LogFile follow implementation, especially when combined with file
rotation. File change events can be unreliably delivered, especially on
Windows, and the polling fallback adds latency. Following across
rotations has never worked reliably on Windows. Without synchronization
between the log writer and readers, race conditions abound: readers can
read from the file while a log entry is only partially written, leading
to decode errors and necessitating retries.

In addition to the complexities stemming from file watches, the LogFile
follow implementation had complexity from needing to handle file
truncations, and (due to a now-fixed bug in the polling file watcher
implementation) evictions to unlock the log file so it could be rotated.
Log files are now always rotated, never truncated, so these situations
no longer need to be handled by the follow code.

Rewrite the LogFile follow implementation in terms of waiting until
LogFile notifies it that a new message has been written to the log file.
The LogFile informs the follower of the file offset of the last complete
write so that the follower knows not to read past that, preventing it
from attempting to decode partial messages and making retries
unnecessary. Synchronization between LogFile and its followers is used
at critical points to prevent missed notifications of writes and races
between file rotations and the follower opening files for read.

Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-05-19 15:22:22 -04:00

84 lines
1.8 KiB
Go

package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog"
import (
"context"
"encoding/json"
"io"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog"
"github.com/docker/docker/daemon/logger/loggerutils"
"github.com/docker/docker/pkg/tailfile"
)
// ReadLogs implements the logger's LogReader interface for the logs
// created by this driver.
func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
return l.writer.ReadLogs(config)
}
func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
l.Reset()
if err := dec.Decode(l); err != nil {
return nil, err
}
var attrs []backend.LogAttr
if len(l.Attrs) != 0 {
attrs = make([]backend.LogAttr, 0, len(l.Attrs))
for k, v := range l.Attrs {
attrs = append(attrs, backend.LogAttr{Key: k, Value: v})
}
}
msg := &logger.Message{
Source: l.Stream,
Timestamp: l.Created,
Line: []byte(l.Log),
Attrs: attrs,
}
return msg, nil
}
type decoder struct {
rdr io.Reader
dec *json.Decoder
jl *jsonlog.JSONLog
}
func (d *decoder) Reset(rdr io.Reader) {
d.rdr = rdr
d.dec = nil
if d.jl != nil {
d.jl.Reset()
}
}
func (d *decoder) Close() {
d.dec = nil
d.rdr = nil
d.jl = nil
}
func (d *decoder) Decode() (msg *logger.Message, err error) {
if d.dec == nil {
d.dec = json.NewDecoder(d.rdr)
}
if d.jl == nil {
d.jl = &jsonlog.JSONLog{}
}
return decodeLogLine(d.dec, d.jl)
}
// decodeFunc is used to create a decoder for the log file reader
func decodeFunc(rdr io.Reader) loggerutils.Decoder {
return &decoder{
rdr: rdr,
dec: nil,
jl: nil,
}
}
func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) {
return tailfile.NewTailReader(ctx, r, req)
}