logger/*: fix reading w/ non-monotonic timestamps

Fix journald and logfile-powered (jsonfile, local) log readers
incorrectly filtering out messages with timestamps < Since which were
preceded by a message with a timestamp >= Since.

Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
Cory Snider 2022-02-10 14:47:24 -05:00
parent 342b44bf20
commit e278d3f185
5 changed files with 72 additions and 70 deletions

View file

@ -100,7 +100,8 @@ var errDrainDone = errors.New("journald drain done")
// journal is reached without encountering a terminal stopping condition, // journal is reached without encountering a terminal stopping condition,
// err == nil is returned. // err == nil is returned.
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *sdjournal.Journal, config logger.ReadConfig, initial chan struct{}) (int, error) { func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *sdjournal.Journal, config logger.ReadConfig, initial chan struct{}) (int, error) {
if initial != nil { isInitial := initial != nil
if isInitial {
defer func() { defer func() {
if initial != nil { if initial != nil {
close(initial) close(initial)
@ -148,7 +149,7 @@ func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *sdjournal.Jour
var sent int var sent int
for i := 0; ; i++ { for i := 0; ; i++ {
if initial != nil && i == 0 && config.Tail > 0 { if isInitial && i == 0 && config.Tail > 0 {
if n, err := j.PreviousSkip(uint(config.Tail)); err != nil || n == 0 { if n, err := j.PreviousSkip(uint(config.Tail)); err != nil || n == 0 {
return sent, err return sent, err
} }
@ -156,8 +157,9 @@ func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *sdjournal.Jour
return sent, err return sent, err
} }
if initial != nil && i == 0 { if isInitial && i == 0 {
// The cursor is in position. Signal that the watcher is // The cursor is in a position which will be unaffected
// by subsequent logging. Signal that the watcher is
// initialized. // initialized.
close(initial) close(initial)
initial = nil // Prevent double-closing. initial = nil // Prevent double-closing.
@ -168,11 +170,13 @@ func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *sdjournal.Jour
if err != nil { if err != nil {
return sent, err return sent, err
} }
if timestamp.Before(config.Since) { // Check if the PreviousSkip went too far back. Check only the
if initial != nil && i == 0 && config.Tail > 0 { // initial position as we are comparing wall-clock timestamps,
// PreviousSkip went too far back. Seek forwards. // which may not be monotonic. We don't want to skip over
j.SeekRealtime(config.Since) // messages sent later in time just because the clock moved
} // backwards.
if isInitial && i == 0 && config.Tail > 0 && timestamp.Before(config.Since) {
j.SeekRealtime(config.Since)
continue continue
} }
if !config.Until.IsZero() && config.Until.Before(timestamp) { if !config.Until.IsZero() && config.Until.Before(timestamp) {

View file

@ -51,6 +51,7 @@ func makeTestMessages() []*logger.Message {
{Source: "stderr", Timestamp: time.Now().Add(-1 * 15 * time.Minute), Line: []byte("continued"), PLogMetaData: &backend.PartialLogMetaData{ID: "bbbbbbbb", Ordinal: 2, Last: true}}, {Source: "stderr", Timestamp: time.Now().Add(-1 * 15 * time.Minute), Line: []byte("continued"), PLogMetaData: &backend.PartialLogMetaData{ID: "bbbbbbbb", Ordinal: 2, Last: true}},
{Source: "stderr", Timestamp: time.Now().Add(-1 * 10 * time.Minute), Line: []byte("a really long message " + strings.Repeat("a", 4096))}, {Source: "stderr", Timestamp: time.Now().Add(-1 * 10 * time.Minute), Line: []byte("a really long message " + strings.Repeat("a", 4096))},
{Source: "stderr", Timestamp: time.Now().Add(-1 * 10 * time.Minute), Line: []byte("just one more message")}, {Source: "stderr", Timestamp: time.Now().Add(-1 * 10 * time.Minute), Line: []byte("just one more message")},
{Source: "stdout", Timestamp: time.Now().Add(-1 * 90 * time.Minute), Line: []byte("someone adjusted the clock")},
} }
} }
@ -523,13 +524,12 @@ func readMessage(t *testing.T, lw *logger.LogWatcher) *logger.Message {
select { select {
case err, open := <-lw.Err: case err, open := <-lw.Err:
t.Errorf("unexpected receive on lw.Err with closed lw.Msg: err=%v, open=%v", err, open) t.Errorf("unexpected receive on lw.Err with closed lw.Msg: err=%v, open=%v", err, open)
return nil
default: default:
} }
return nil
} }
if msg != nil { assert.Assert(t, msg != nil)
t.Logf("loggertest: ReadMessage [%v %v] %s", msg.Source, msg.Timestamp, msg.Line) t.Logf("[%v] %s: %s", msg.Timestamp, msg.Source, msg.Line)
}
return msg return msg
} }
} }

View file

@ -4,7 +4,6 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"time"
"github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -12,10 +11,10 @@ import (
) )
type follow struct { type follow struct {
LogFile *LogFile LogFile *LogFile
Watcher *logger.LogWatcher Watcher *logger.LogWatcher
Decoder Decoder Decoder Decoder
Since, Until time.Time Forwarder *forwarder
log *logrus.Entry log *logrus.Entry
c chan logPos c chan logPos
@ -49,7 +48,7 @@ func (fl *follow) Do(f *os.File, read logPos) {
fl.Watcher.Err <- err fl.Watcher.Err <- err
return return
} }
if fl.decode(f) { if !fl.forward(f) {
return return
} }
@ -91,7 +90,7 @@ func (fl *follow) Do(f *os.File, read logPos) {
read.size = 0 read.size = 0
} }
if fl.decode(io.NewSectionReader(f, read.size, wrote.size-read.size)) { if !fl.forward(io.NewSectionReader(f, read.size, wrote.size-read.size)) {
return return
} }
read = wrote read = wrote
@ -132,34 +131,10 @@ func (fl *follow) nextPos(current logPos) (next logPos, ok bool) {
return next, true return next, true
} }
// decode decodes log messages from r and sends messages with timestamps between // forward decodes log messages from r and forwards them to the log watcher.
// Since and Until to the log watcher.
// //
// The return value, done, signals whether following should end due to a // The return value, cont, signals whether following should continue.
// condition encountered during decode. func (fl *follow) forward(r io.Reader) (cont bool) {
func (fl *follow) decode(r io.Reader) (done bool) {
fl.Decoder.Reset(r) fl.Decoder.Reset(r)
for { return fl.Forwarder.Do(fl.Watcher, fl.Decoder)
msg, err := fl.Decoder.Decode()
if err != nil {
if errors.Is(err, io.EOF) {
return false
}
fl.Watcher.Err <- err
return true
}
if !fl.Since.IsZero() && msg.Timestamp.Before(fl.Since) {
continue
}
if !fl.Until.IsZero() && msg.Timestamp.After(fl.Until) {
return true
}
// send the message, unless the consumer is gone
select {
case fl.Watcher.Msg <- msg:
case <-fl.Watcher.WatchConsumerGone():
return true
}
}
} }

View file

@ -411,6 +411,7 @@ func (w *LogFile) readLogsLocked(currentPos logPos, config logger.ReadConfig, wa
defer dec.Close() defer dec.Close()
currentChunk := io.NewSectionReader(currentFile, 0, currentPos.size) currentChunk := io.NewSectionReader(currentFile, 0, currentPos.size)
fwd := newForwarder(config)
if config.Tail != 0 { if config.Tail != 0 {
// TODO(@cpuguy83): Instead of opening every file, only get the files which // TODO(@cpuguy83): Instead of opening every file, only get the files which
@ -449,7 +450,7 @@ func (w *LogFile) readLogsLocked(currentPos logPos, config logger.ReadConfig, wa
readers = append(readers, currentChunk) readers = append(readers, currentChunk)
} }
ok := tailFiles(readers, watcher, dec, w.getTailReader, config) ok := tailFiles(readers, watcher, dec, w.getTailReader, config.Tail, fwd)
closeFiles() closeFiles()
if !ok { if !ok {
return return
@ -463,11 +464,10 @@ func (w *LogFile) readLogsLocked(currentPos logPos, config logger.ReadConfig, wa
} }
(&follow{ (&follow{
LogFile: w, LogFile: w,
Watcher: watcher, Watcher: watcher,
Decoder: dec, Decoder: dec,
Since: config.Since, Forwarder: fwd,
Until: config.Until,
}).Do(currentFile, currentPos) }).Do(currentFile, currentPos)
} }
@ -573,7 +573,7 @@ func decompress(dst io.WriteSeeker, src io.ReadSeeker) error {
return rc.Close() return rc.Close()
} }
func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, getTailReader GetTailReaderFunc, config logger.ReadConfig) (cont bool) { func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, getTailReader GetTailReaderFunc, nLines int, fwd *forwarder) (cont bool) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -583,20 +583,18 @@ func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, ge
select { select {
case <-ctx.Done(): case <-ctx.Done():
case <-watcher.WatchConsumerGone(): case <-watcher.WatchConsumerGone():
cont = false
cancel() cancel()
} }
}() }()
readers := make([]io.Reader, 0, len(files)) readers := make([]io.Reader, 0, len(files))
if config.Tail > 0 { if nLines > 0 {
nLines := config.Tail
for i := len(files) - 1; i >= 0 && nLines > 0; i-- { for i := len(files) - 1; i >= 0 && nLines > 0; i-- {
tail, n, err := getTailReader(ctx, files[i], nLines) tail, n, err := getTailReader(ctx, files[i], nLines)
if err != nil { if err != nil {
watcher.Err <- errors.Wrap(err, "error finding file position to start log tailing") watcher.Err <- errors.Wrap(err, "error finding file position to start log tailing")
return return false
} }
nLines -= n nLines -= n
readers = append([]io.Reader{tail}, readers...) readers = append([]io.Reader{tail}, readers...)
@ -609,24 +607,47 @@ func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, ge
rdr := io.MultiReader(readers...) rdr := io.MultiReader(readers...)
dec.Reset(rdr) dec.Reset(rdr)
return fwd.Do(watcher, dec)
}
type forwarder struct {
since, until time.Time
}
func newForwarder(config logger.ReadConfig) *forwarder {
return &forwarder{since: config.Since, until: config.Until}
}
// Do reads log messages from dec and sends the messages matching the filter
// conditions to watcher. Do returns cont=true iff it has read all messages from
// dec without encountering a message with a timestamp which is after the
// configured until time.
func (fwd *forwarder) Do(watcher *logger.LogWatcher, dec Decoder) (cont bool) {
for { for {
msg, err := dec.Decode() msg, err := dec.Decode()
if err != nil { if err != nil {
if !errors.Is(err, io.EOF) { if errors.Is(err, io.EOF) {
watcher.Err <- err return true
} }
return watcher.Err <- err
return false
} }
if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { if !fwd.since.IsZero() {
continue if msg.Timestamp.Before(fwd.since) {
continue
}
// We've found our first message with a timestamp >= since. As message
// timestamps might not be monotonic, we need to skip the since check for all
// subsequent messages so we do not filter out later messages which happen to
// have timestamps before since.
fwd.since = time.Time{}
} }
if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { if !fwd.until.IsZero() && msg.Timestamp.After(fwd.until) {
return return false
} }
select { select {
case <-ctx.Done(): case <-watcher.WatchConsumerGone():
return return false
case watcher.Msg <- msg: case watcher.Msg <- msg:
} }
} }

View file

@ -64,19 +64,21 @@ func TestTailFiles(t *testing.T) {
for desc, config := range map[string]logger.ReadConfig{} { for desc, config := range map[string]logger.ReadConfig{} {
t.Run(desc, func(t *testing.T) { t.Run(desc, func(t *testing.T) {
started := make(chan struct{}) started := make(chan struct{})
fwd := newForwarder(config)
go func() { go func() {
close(started) close(started)
tailFiles(files, watcher, dec, tailReader, config) tailFiles(files, watcher, dec, tailReader, config.Tail, fwd)
}() }()
<-started <-started
}) })
} }
config := logger.ReadConfig{Tail: 2} config := logger.ReadConfig{Tail: 2}
fwd := newForwarder(config)
started := make(chan struct{}) started := make(chan struct{})
go func() { go func() {
close(started) close(started)
tailFiles(files, watcher, dec, tailReader, config) tailFiles(files, watcher, dec, tailReader, config.Tail, fwd)
}() }()
<-started <-started