1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Revert "use pubsub instead of filenotify to follow json logs"

This reverts commit b1594c59f5.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
Brian Goff 2016-02-23 21:07:38 -05:00
parent f78091897a
commit 91fdfdd537
3 changed files with 95 additions and 84 deletions

View file

@ -14,7 +14,6 @@ import (
"github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/daemon/logger/loggerutils"
"github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/pubsub"
"github.com/docker/go-units" "github.com/docker/go-units"
) )
@ -29,7 +28,6 @@ type JSONFileLogger struct {
ctx logger.Context ctx logger.Context
readers map[*logger.LogWatcher]struct{} // stores the active log followers readers map[*logger.LogWatcher]struct{} // stores the active log followers
extra []byte // json-encoded extra attributes extra []byte // json-encoded extra attributes
writeNotifier *pubsub.Publisher
} }
func init() { func init() {
@ -83,7 +81,6 @@ func New(ctx logger.Context) (logger.Logger, error) {
writer: writer, writer: writer,
readers: make(map[*logger.LogWatcher]struct{}), readers: make(map[*logger.LogWatcher]struct{}),
extra: extra, extra: extra,
writeNotifier: pubsub.NewPublisher(0, 10),
}, nil }, nil
} }
@ -107,7 +104,6 @@ func (l *JSONFileLogger) Log(msg *logger.Message) error {
l.buf.WriteByte('\n') l.buf.WriteByte('\n')
_, err = l.writer.Write(l.buf.Bytes()) _, err = l.writer.Write(l.buf.Bytes())
l.writeNotifier.Publish(struct{}{})
l.buf.Reset() l.buf.Reset()
return err return err
@ -141,7 +137,6 @@ func (l *JSONFileLogger) Close() error {
r.Close() r.Close()
delete(l.readers, r) delete(l.readers, r)
} }
l.writeNotifier.Close()
l.mu.Unlock() l.mu.Unlock()
return err return err
} }

View file

@ -10,11 +10,14 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger"
"github.com/docker/docker/pkg/filenotify"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/tailfile" "github.com/docker/docker/pkg/tailfile"
) )
const maxJSONDecodeRetry = 20000
func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
l.Reset() l.Reset()
if err := dec.Decode(l); err != nil { if err := dec.Decode(l); err != nil {
@ -32,6 +35,7 @@ func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, erro
// created by this driver. // created by this driver.
func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
logWatcher := logger.NewLogWatcher() logWatcher := logger.NewLogWatcher()
go l.readLogs(logWatcher, config) go l.readLogs(logWatcher, config)
return logWatcher return logWatcher
} }
@ -81,7 +85,7 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
l.mu.Unlock() l.mu.Unlock()
notifyRotate := l.writer.NotifyRotate() notifyRotate := l.writer.NotifyRotate()
l.followLogs(latestFile, logWatcher, notifyRotate, config.Since) followLogs(latestFile, logWatcher, notifyRotate, config.Since)
l.mu.Lock() l.mu.Lock()
delete(l.readers, logWatcher) delete(l.readers, logWatcher)
@ -117,32 +121,90 @@ func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since ti
} }
} }
func (l *JSONFileLogger) followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
var ( dec := json.NewDecoder(f)
rotated bool l := &jsonlog.JSONLog{}
dec = json.NewDecoder(f) fileWatcher, err := filenotify.New()
log = &jsonlog.JSONLog{}
writeNotify = l.writeNotifier.Subscribe()
watchClose = logWatcher.WatchClose()
)
reopenLogFile := func() error {
f.Close()
f, err := os.Open(f.Name())
if err != nil { if err != nil {
return err logWatcher.Err <- err
}
dec = json.NewDecoder(f)
rotated = true
return nil
} }
defer fileWatcher.Close()
readToEnd := func() error { var retries int
for { for {
msg, err := decodeLogLine(dec, log) msg, err := decodeLogLine(dec, l)
if err != nil { if err != nil {
return err if err != io.EOF {
// try again because this shouldn't happen
if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
dec = json.NewDecoder(f)
retries++
continue
}
// io.ErrUnexpectedEOF is returned from json.Decoder when there is
// remaining data in the parser's buffer while an io.EOF occurs.
// If the json logger writes a partial json log entry to the disk
// while at the same time the decoder tries to decode it, the race condition happens.
if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
reader := io.MultiReader(dec.Buffered(), f)
dec = json.NewDecoder(reader)
retries++
continue
}
logWatcher.Err <- err
return
}
logrus.WithField("logger", "json-file").Debugf("waiting for events")
if err := fileWatcher.Add(f.Name()); err != nil {
logrus.WithField("logger", "json-file").Warn("falling back to file poller")
fileWatcher.Close()
fileWatcher = filenotify.NewPollingWatcher()
if err := fileWatcher.Add(f.Name()); err != nil {
logrus.Errorf("error watching log file for modifications: %v", err)
logWatcher.Err <- err
}
}
select {
case <-fileWatcher.Events():
dec = json.NewDecoder(f)
fileWatcher.Remove(f.Name())
continue
case <-fileWatcher.Errors():
fileWatcher.Remove(f.Name())
logWatcher.Err <- err
return
case <-logWatcher.WatchClose():
fileWatcher.Remove(f.Name())
return
case <-notifyRotate:
f, err = os.Open(f.Name())
if err != nil {
logWatcher.Err <- err
return
}
dec = json.NewDecoder(f)
fileWatcher.Remove(f.Name())
fileWatcher.Add(f.Name())
continue
}
}
retries = 0 // reset retries since we've succeeded
if !since.IsZero() && msg.Timestamp.Before(since) {
continue
}
select {
case logWatcher.Msg <- msg:
case <-logWatcher.WatchClose():
logWatcher.Msg <- msg
for {
msg, err := decodeLogLine(dec, l)
if err != nil {
return
} }
if !since.IsZero() && msg.Timestamp.Before(since) { if !since.IsZero() && msg.Timestamp.Before(since) {
continue continue
@ -150,49 +212,5 @@ func (l *JSONFileLogger) followLogs(f *os.File, logWatcher *logger.LogWatcher, n
logWatcher.Msg <- msg logWatcher.Msg <- msg
} }
} }
defer func() {
l.writeNotifier.Evict(writeNotify)
if rotated {
f.Close()
}
}()
for {
select {
case <-watchClose:
readToEnd()
return
case <-notifyRotate:
readToEnd()
if err := reopenLogFile(); err != nil {
logWatcher.Err <- err
return
}
case _, ok := <-writeNotify:
if err := readToEnd(); err == io.EOF {
if !ok {
// The writer is closed, no new logs will be generated.
return
}
select {
case <-notifyRotate:
if err := reopenLogFile(); err != nil {
logWatcher.Err <- err
return
}
default:
dec = json.NewDecoder(f)
}
} else if err == io.ErrUnexpectedEOF {
dec = json.NewDecoder(io.MultiReader(dec.Buffered(), f))
} else {
logrus.Errorf("Failed to decode json log %s: %v", f.Name(), err)
logWatcher.Err <- err
return
}
}
} }
} }

View file

@ -56,10 +56,8 @@ func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} {
// Evict removes the specified subscriber from receiving any more messages. // Evict removes the specified subscriber from receiving any more messages.
func (p *Publisher) Evict(sub chan interface{}) { func (p *Publisher) Evict(sub chan interface{}) {
p.m.Lock() p.m.Lock()
if _, ok := p.subscribers[sub]; ok {
delete(p.subscribers, sub) delete(p.subscribers, sub)
close(sub) close(sub)
}
p.m.Unlock() p.m.Unlock()
} }