Merge pull request #24514 from cpuguy83/fix_log_follow_rotate
Fix issues with tailing rotated jsonlog file
This commit is contained in:
commit
cbd4e4d48a
1 changed files with 130 additions and 66 deletions
|
@ -3,11 +3,14 @@ package jsonfilelog
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/daemon/logger"
|
||||
"github.com/docker/docker/pkg/filenotify"
|
||||
|
@ -44,6 +47,10 @@ func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
|
|||
func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
|
||||
defer close(logWatcher.Msg)
|
||||
|
||||
// lock so the read stream doesn't get corrupted due to rotations or other log data written while we read
|
||||
// This will block writes!!!
|
||||
l.mu.Lock()
|
||||
|
||||
pth := l.writer.LogPath()
|
||||
var files []io.ReadSeeker
|
||||
for i := l.writer.MaxFiles(); i > 1; i-- {
|
||||
|
@ -63,6 +70,7 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
|
|||
latestFile, err := os.Open(pth)
|
||||
if err != nil {
|
||||
logWatcher.Err <- err
|
||||
l.mu.Unlock()
|
||||
return
|
||||
}
|
||||
defer latestFile.Close()
|
||||
|
@ -83,6 +91,7 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
|
|||
if err := latestFile.Close(); err != nil {
|
||||
logrus.Errorf("Error closing file: %v", err)
|
||||
}
|
||||
l.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -90,7 +99,6 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
|
|||
latestFile.Seek(0, os.SEEK_END)
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
l.readers[logWatcher] = struct{}{}
|
||||
l.mu.Unlock()
|
||||
|
||||
|
@ -131,19 +139,11 @@ func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since ti
|
|||
}
|
||||
}
|
||||
|
||||
func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
|
||||
dec := json.NewDecoder(f)
|
||||
l := &jsonlog.JSONLog{}
|
||||
|
||||
func watchFile(name string) (filenotify.FileWatcher, error) {
|
||||
fileWatcher, err := filenotify.New()
|
||||
if err != nil {
|
||||
logWatcher.Err <- err
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
f.Close()
|
||||
fileWatcher.Close()
|
||||
}()
|
||||
name := f.Name()
|
||||
|
||||
if err := fileWatcher.Add(name); err != nil {
|
||||
logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err)
|
||||
|
@ -151,72 +151,136 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
|
|||
fileWatcher = filenotify.NewPollingWatcher()
|
||||
|
||||
if err := fileWatcher.Add(name); err != nil {
|
||||
fileWatcher.Close()
|
||||
logrus.Debugf("error watching log file for modifications: %v", err)
|
||||
logWatcher.Err <- err
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return fileWatcher, nil
|
||||
}
|
||||
|
||||
func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
|
||||
dec := json.NewDecoder(f)
|
||||
l := &jsonlog.JSONLog{}
|
||||
|
||||
name := f.Name()
|
||||
fileWatcher, err := watchFile(name)
|
||||
if err != nil {
|
||||
logWatcher.Err <- err
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
f.Close()
|
||||
fileWatcher.Close()
|
||||
}()
|
||||
|
||||
var retries int
|
||||
handleRotate := func() error {
|
||||
f.Close()
|
||||
fileWatcher.Remove(name)
|
||||
|
||||
// retry when the file doesn't exist
|
||||
for retries := 0; retries <= 5; retries++ {
|
||||
f, err = os.Open(name)
|
||||
if err == nil || !os.IsNotExist(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fileWatcher.Add(name); err != nil {
|
||||
return err
|
||||
}
|
||||
dec = json.NewDecoder(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
errRetry := errors.New("retry")
|
||||
errDone := errors.New("done")
|
||||
waitRead := func() error {
|
||||
select {
|
||||
case e := <-fileWatcher.Events():
|
||||
switch e.Op {
|
||||
case fsnotify.Write:
|
||||
dec = json.NewDecoder(f)
|
||||
return nil
|
||||
case fsnotify.Rename, fsnotify.Remove:
|
||||
select {
|
||||
case <-notifyRotate:
|
||||
case <-logWatcher.WatchClose():
|
||||
fileWatcher.Remove(name)
|
||||
return errDone
|
||||
}
|
||||
if err := handleRotate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errRetry
|
||||
case err := <-fileWatcher.Errors():
|
||||
logrus.Debug("logger got error watching file: %v", err)
|
||||
// Something happened, let's try and stay alive and create a new watcher
|
||||
if retries <= 5 {
|
||||
fileWatcher.Close()
|
||||
fileWatcher, err = watchFile(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
retries++
|
||||
return errRetry
|
||||
}
|
||||
return err
|
||||
case <-logWatcher.WatchClose():
|
||||
fileWatcher.Remove(name)
|
||||
return errDone
|
||||
}
|
||||
}
|
||||
|
||||
var retries int
|
||||
handleDecodeErr := func(err error) error {
|
||||
if err == io.EOF {
|
||||
for err := waitRead(); err != nil; {
|
||||
if err == errRetry {
|
||||
// retry the waitRead
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// try again because this shouldn't happen
|
||||
if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
|
||||
dec = json.NewDecoder(f)
|
||||
retries++
|
||||
return nil
|
||||
}
|
||||
// io.ErrUnexpectedEOF is returned from json.Decoder when there is
|
||||
// remaining data in the parser's buffer while an io.EOF occurs.
|
||||
// If the json logger writes a partial json log entry to the disk
|
||||
// while at the same time the decoder tries to decode it, the race condition happens.
|
||||
if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
|
||||
reader := io.MultiReader(dec.Buffered(), f)
|
||||
dec = json.NewDecoder(reader)
|
||||
retries++
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// main loop
|
||||
for {
|
||||
msg, err := decodeLogLine(dec, l)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
// try again because this shouldn't happen
|
||||
if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
|
||||
dec = json.NewDecoder(f)
|
||||
retries++
|
||||
continue
|
||||
if err := handleDecodeErr(err); err != nil {
|
||||
if err == errDone {
|
||||
return
|
||||
}
|
||||
|
||||
// io.ErrUnexpectedEOF is returned from json.Decoder when there is
|
||||
// remaining data in the parser's buffer while an io.EOF occurs.
|
||||
// If the json logger writes a partial json log entry to the disk
|
||||
// while at the same time the decoder tries to decode it, the race condition happens.
|
||||
if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
|
||||
reader := io.MultiReader(dec.Buffered(), f)
|
||||
dec = json.NewDecoder(reader)
|
||||
retries++
|
||||
continue
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-fileWatcher.Events():
|
||||
dec = json.NewDecoder(f)
|
||||
continue
|
||||
case <-fileWatcher.Errors():
|
||||
// we got an unrecoverable error, so return
|
||||
logWatcher.Err <- err
|
||||
return
|
||||
case <-logWatcher.WatchClose():
|
||||
fileWatcher.Remove(name)
|
||||
return
|
||||
case <-notifyRotate:
|
||||
f.Close()
|
||||
fileWatcher.Remove(name)
|
||||
|
||||
// retry when the file doesn't exist
|
||||
for retries := 0; retries <= 5; retries++ {
|
||||
f, err = os.Open(name)
|
||||
if err == nil || !os.IsNotExist(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err = fileWatcher.Add(name); err != nil {
|
||||
logWatcher.Err <- err
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
logWatcher.Err <- err
|
||||
return
|
||||
}
|
||||
|
||||
dec = json.NewDecoder(f)
|
||||
continue
|
||||
}
|
||||
// ready to try again
|
||||
continue
|
||||
}
|
||||
|
||||
retries = 0 // reset retries since we've succeeded
|
||||
|
|
Loading…
Add table
Reference in a new issue