Fixes some issues with jsonfile write/read

This cleans up some of the use of the filepoller which makes reading
significantly more robust and gives fewer changes to fallback to the
polling based watcher.
In a lot of cases, if the file was being rotated while we were adding it
to the watcher, it would return an error that the file doesn't exist and
would fallback.
In some cases this fallback could be triggered multiple times even if we
were already on the fallback/poll-based watcher.

It also fixes an open file leak caused by not closing files properly on
rotate, as well as not closing files that were read via the `tail`
function until after the log reader is completed.

Prior to the above changes, it was relatively simple to cause the log
reader to error out by having quick rotations, for example:
```
$ docker run --name test --log-opt max-size=10b --log-opt max-files=10
-d busybox sh -c 'while true; do usleep 500000; echo hello; done'
$ docker logs -f test
```
After these changes I can run this forever without error.

Another fix removes 2 `os.Stat` calls when rotating files. The stat
calls are not needed since we are just calling `os.Rename` anyway, which
will in turn also just produce the same error that `Stat` would.
These `Stat` calls were also quite expensive.
Removing these stat calls also seemed to resolve an issue causing slow
memory growth on the daemon.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
Brian Goff 2016-03-17 12:13:23 -04:00
parent 04c7d94c71
commit 1ae9dcf97d
3 changed files with 76 additions and 41 deletions

View File

@ -54,7 +54,6 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
}
continue
}
defer f.Close()
files = append(files, f)
}
@ -63,15 +62,19 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
logWatcher.Err <- err
return
}
defer latestFile.Close()
files = append(files, latestFile)
tailer := ioutils.MultiReadSeeker(files...)
if config.Tail != 0 {
tailer := ioutils.MultiReadSeeker(append(files, latestFile)...)
tailFile(tailer, logWatcher, config.Tail, config.Since)
}
// close all the rotated files
for _, f := range files {
if err := f.(io.Closer).Close(); err != nil {
logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err)
}
}
if !config.Follow {
return
}
@ -129,7 +132,23 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
if err != nil {
logWatcher.Err <- err
}
defer fileWatcher.Close()
defer func() {
f.Close()
fileWatcher.Close()
}()
name := f.Name()
if err := fileWatcher.Add(name); err != nil {
logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err)
fileWatcher.Close()
fileWatcher = filenotify.NewPollingWatcher()
if err := fileWatcher.Add(name); err != nil {
logrus.Debugf("error watching log file for modifications: %v", err)
logWatcher.Err <- err
return
}
}
var retries int
for {
@ -153,42 +172,42 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
retries++
continue
}
logWatcher.Err <- err
return
}
logrus.WithField("logger", "json-file").Debugf("waiting for events")
if err := fileWatcher.Add(f.Name()); err != nil {
logrus.WithField("logger", "json-file").Warn("falling back to file poller")
fileWatcher.Close()
fileWatcher = filenotify.NewPollingWatcher()
if err := fileWatcher.Add(f.Name()); err != nil {
logrus.Errorf("error watching log file for modifications: %v", err)
logWatcher.Err <- err
}
}
select {
case <-fileWatcher.Events():
dec = json.NewDecoder(f)
fileWatcher.Remove(f.Name())
continue
case <-fileWatcher.Errors():
fileWatcher.Remove(f.Name())
logWatcher.Err <- err
return
case <-logWatcher.WatchClose():
fileWatcher.Remove(f.Name())
fileWatcher.Remove(name)
return
case <-notifyRotate:
f, err = os.Open(f.Name())
f.Close()
fileWatcher.Remove(name)
// retry when the file doesn't exist
for retries := 0; retries <= 5; retries++ {
f, err = os.Open(name)
if err == nil || !os.IsNotExist(err) {
break
}
}
if err = fileWatcher.Add(name); err != nil {
logWatcher.Err <- err
return
}
if err != nil {
logWatcher.Err <- err
return
}
dec = json.NewDecoder(f)
fileWatcher.Remove(f.Name())
fileWatcher.Add(f.Name())
continue
}
}

View File

@ -87,33 +87,17 @@ func rotate(name string, maxFiles int) error {
for i := maxFiles - 1; i > 1; i-- {
toPath := name + "." + strconv.Itoa(i)
fromPath := name + "." + strconv.Itoa(i-1)
if err := backup(fromPath, toPath); err != nil && !os.IsNotExist(err) {
if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) {
return err
}
}
if err := backup(name, name+".1"); err != nil {
if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// backup renames a file from fromPath to toPath
func backup(fromPath, toPath string) error {
if _, err := os.Stat(fromPath); os.IsNotExist(err) {
return err
}
if _, err := os.Stat(toPath); !os.IsNotExist(err) {
err := os.Remove(toPath)
if err != nil {
return err
}
}
return os.Rename(fromPath, toPath)
}
// LogPath returns the location the given writer logs to.
func (w *RotateFileWriter) LogPath() string {
return w.f.Name()

View File

@ -0,0 +1,32 @@
package main
import (
"fmt"
"strings"
"time"
"github.com/go-check/check"
)
func (s *DockerSuite) BenchmarkLogsCLIRotateFollow(c *check.C) {
out, _ := dockerCmd(c, "run", "-d", "--log-opt", "max-size=1b", "--log-opt", "max-file=10", "busybox", "sh", "-c", "while true; do usleep 50000; echo hello; done")
id := strings.TrimSpace(out)
ch := make(chan error, 1)
go func() {
ch <- nil
out, _, _ := dockerCmdWithError("logs", "-f", id)
// if this returns at all, it's an error
ch <- fmt.Errorf(out)
}()
<-ch
select {
case <-time.After(30 * time.Second):
// ran for 30 seconds with no problem
return
case err := <-ch:
if err != nil {
c.Fatal(err)
}
}
}