From 1ada3e85bf89201910c28f2ff6892c00cee0f137 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Wed, 7 Feb 2018 10:35:26 -0500 Subject: [PATCH 1/9] Small journal cleanup Clean up a deferred function call in the journal reading logic. Signed-off-by: Nalin Dahyabhai --- daemon/logger/journald/read.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index 4bddfd5781..981fabc9d8 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -330,12 +330,12 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon // here, potentially while the goroutine that uses them is still // running. Otherwise, close them when we return from this function. following := false - defer func(pfollowing *bool) { - if !*pfollowing { + defer func() { + if !following { close(logWatcher.Msg) } C.sd_journal_close(j) - }(&following) + }() // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) if rc != 0 { From e8f6166791c097deb15c39f8dddf6f97be65b224 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Sun, 10 Mar 2019 00:27:14 +0000 Subject: [PATCH 2/9] journald/read: simplify code Minor code simplification. Signed-off-by: Kir Kolyshkin --- daemon/logger/journald/read.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index 981fabc9d8..41384a9e4e 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -216,12 +216,12 @@ drain: // the stream that we would have // assigned that value. source := "" - if C.get_priority(j, &priority) != 0 { - source = "" - } else if priority == C.int(journal.PriErr) { - source = "stderr" - } else if priority == C.int(journal.PriInfo) { - source = "stdout" + if C.get_priority(j, &priority) == 0 { + if priority == C.int(journal.PriErr) { + source = "stderr" + } else if priority == C.int(journal.PriInfo) { + source = "stdout" + } } // Retrieve the values of any variables we're adding to the journal. var attrs []backend.LogAttr From ff3cd167ea4d089b7695a263ba2fc4caa0a0750c Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Sun, 10 Mar 2019 03:49:06 +0000 Subject: [PATCH 3/9] journald/read: simplify walking backwards In case Tail=N parameter is requested, we need to show N lines. It does not make sense to walk backwards one by one if we can do it at once. Now, if Since=T is also provided, make sure we haven't jumped too far (before T), and if we did, move forward. The primary motivation for this was to make the code simpler. This also fixes a tiny bug in the "since" implementation. Before this commit: > $ docker logs -t --tail=6000 --since="2019-03-10T03:54:25.00" $ID | head > 2019-03-10T03:54:24.999821000Z 95981 After: > $ docker logs -t --tail=6000 --since="2019-03-10T03:54:25.00" $ID | head > 2019-03-10T03:54:25.000013000Z 95982 Signed-off-by: Kir Kolyshkin --- daemon/logger/journald/read.go | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index 41384a9e4e..11a45415b1 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -361,7 +361,6 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon untilUnixMicro = uint64(nano / 1000) } if config.Tail > 0 { - lines := config.Tail // If until time provided, start from there. // Otherwise start at the end of the journal. if untilUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)) < 0 { @@ -371,29 +370,13 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } - if C.sd_journal_previous(j) < 0 { - logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") - return - } - // Walk backward. - for lines > 0 { - // Stop if the entry time is before our cutoff. - // We'll need the entry time if it isn't, so go - // ahead and parse it now. - if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { - break - } else { - // Compare the timestamp on the entry to our threshold value. - if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { - break - } - } - lines-- - // If we're at the start of the journal, or - // don't need to back up past any more entries, - // stop. - if lines == 0 || C.sd_journal_previous(j) <= 0 { - break + // (Try to) skip backwards by the requested number of lines... + if C.sd_journal_previous_skip(j, C.uint64_t(config.Tail)) > 0 { + // ...but not before "since" + if sinceUnixMicro != 0 && + C.sd_journal_get_realtime_usec(j, &stamp) == 0 && + uint64(stamp) < sinceUnixMicro { + C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) } } } else { From 79039720c8b7352691350bd56be3cc226d67f205 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Sun, 10 Mar 2019 00:28:11 +0000 Subject: [PATCH 4/9] journald/read: avoid being blocked on send In case the LogConsumer is gone, the code that sends the message can stuck forever. Wrap the code in select case, as all other loggers do. Signed-off-by: Kir Kolyshkin --- daemon/logger/journald/read.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index 11a45415b1..cbd1e24677 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -230,12 +230,17 @@ drain: kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]}) } - // Send the log message. - logWatcher.Msg <- &logger.Message{ + // Send the log message, unless the consumer is gone + select { + case <-logWatcher.WatchConsumerGone(): + done = true // we won't be able to write anything anymore + break drain + case logWatcher.Msg <- &logger.Message{ Line: line, Source: source, Timestamp: timestamp.In(time.UTC), Attrs: attrs, + }: } } // If we're at the end of the journal, we're done (for now). From 981c01665bcb2c9fc5e555c5b976995f31c2a6b4 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Mon, 11 Mar 2019 15:40:17 -0700 Subject: [PATCH 5/9] Call sd_journal_get_fd() earlier, only if needed 1. The journald client library initializes inotify watch(es) during the first call to sd_journal_get_fd(), and it make sense to open it earlier in order to not lose any journal file rotation events. 2. It only makes sense to call this if we're going to use it later on -- so add a check for config.Follow. 3. Remove the redundant call to sd_journal_get_fd(). NOTE that any subsequent calls to sd_journal_get_fd() return the same file descriptor, so there's no real need to save it for later use in wait_for_data_cancelable(). Based on earlier patch by Nalin Dahyabhai . Signed-off-by: Kir Kolyshkin --- daemon/logger/journald/read.go | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index cbd1e24677..8aa01c460d 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -330,6 +330,15 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon close(logWatcher.Msg) return } + if config.Follow { + // Initialize library inotify watches early + rc = C.sd_journal_get_fd(j) + if rc < 0 { + logWatcher.Err <- errors.New("error getting journald fd: " + CErr(rc)) + close(logWatcher.Msg) + return + } + } // If we end up following the log, we can set the journal context // pointer and the channel pointer to nil so that we won't close them // here, potentially while the goroutine that uses them is still @@ -402,21 +411,16 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon } cursor, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro) if config.Follow { - // Allocate a descriptor for following the journal, if we'll - // need one. Do it here so that we can report if it fails. - if fd := C.sd_journal_get_fd(j); fd < C.int(0) { - logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) + // Create a pipe that we can poll at the same time as + // the journald descriptor. + ret := C.pipe(&pipes[0]) + if ret < 0 { + logWatcher.Err <- fmt.Errorf("error creating journald notification pipe") } else { - // Create a pipe that we can poll at the same time as - // the journald descriptor. - if C.pipe(&pipes[0]) == C.int(-1) { - logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") - } else { - cursor = s.followJournal(logWatcher, j, pipes, cursor, untilUnixMicro) - // Let followJournal handle freeing the journal context - // object and closing the channel. - following = true - } + cursor = s.followJournal(logWatcher, j, pipes, cursor, untilUnixMicro) + // Let followJournal handle freeing the journal context + // object and closing the channel. + following = true } } From f091febc942859ffbc881f3a3aa327366603ae65 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Mon, 11 Mar 2019 17:20:56 -0700 Subject: [PATCH 6/9] journald/read: simplify/fix followJournal() TL;DR: simplify the code, fix --follow hanging indefinitely Do the following to simplify the followJournal() code: 1. Use Go-native select instead of C-native polling. 2. Use Watch{Producer,Consumer}Gone(), eliminating the need to have journald.closed variable, and an extra goroutine. 3. Use sd_journal_wait(). In the words of its own man page: > A synchronous alternative for using sd_journal_get_fd(), > sd_journal_get_events(), sd_journal_get_timeout() and > sd_journal_process() is sd_journal_wait(). Unfortunately, the logic is still not as simple as it could be; the reason being, once the container has exited, journald might still be writing some logs from its internal buffers onto journal file(s), and there is no way to figure out whether it's done so we are guaranteed to read all of it back. This bug can be reproduced with something like > $ ID=$(docker run -d busybox seq 1 150000); docker logs --follow $ID > ... > 128123 > $ (The last expected output line should be `150000`). To avoid exiting from followJournal() early, add the following logic: once the container is gone, keep trying to drain the journal until there's no new data for at least `waitTimeout` time period. Should fix https://github.com/docker/for-linux/issues/575 Signed-off-by: Kir Kolyshkin --- daemon/logger/journald/journald.go | 1 - daemon/logger/journald/read.go | 178 ++++++++++------------------- 2 files changed, 59 insertions(+), 120 deletions(-) diff --git a/daemon/logger/journald/journald.go b/daemon/logger/journald/journald.go index 508ca23f49..4d3952cba4 100644 --- a/daemon/logger/journald/journald.go +++ b/daemon/logger/journald/journald.go @@ -21,7 +21,6 @@ type journald struct { mu sync.Mutex vars map[string]string // additional variables and values to send to the journal along with the log message readers map[*logger.LogWatcher]struct{} - closed bool } func init() { diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index 8aa01c460d..3dfa08b0a4 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -101,55 +101,10 @@ package journald // import "github.com/docker/docker/daemon/logger/journald" // } // return rc; //} -//static int wait_for_data_cancelable(sd_journal *j, int pipefd) -//{ -// struct pollfd fds[2]; -// uint64_t when = 0; -// int timeout, jevents, i; -// struct timespec ts; -// uint64_t now; -// -// memset(&fds, 0, sizeof(fds)); -// fds[0].fd = pipefd; -// fds[0].events = POLLHUP; -// fds[1].fd = sd_journal_get_fd(j); -// if (fds[1].fd < 0) { -// return fds[1].fd; -// } -// -// do { -// jevents = sd_journal_get_events(j); -// if (jevents < 0) { -// return jevents; -// } -// fds[1].events = jevents; -// sd_journal_get_timeout(j, &when); -// if (when == -1) { -// timeout = -1; -// } else { -// clock_gettime(CLOCK_MONOTONIC, &ts); -// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; -// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; -// } -// i = poll(fds, 2, timeout); -// if ((i == -1) && (errno != EINTR)) { -// /* An unexpected error. */ -// return (errno != 0) ? -errno : -EINTR; -// } -// if (fds[0].revents & POLLHUP) { -// /* The close notification pipe was closed. */ -// return 0; -// } -// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { -// /* Data, which we might care about, was appended. */ -// return 1; -// } -// } while ((fds[0].revents & POLLHUP) == 0); -// return 0; -//} import "C" import ( + "errors" "fmt" "strings" "time" @@ -158,27 +113,33 @@ import ( "github.com/coreos/go-systemd/journal" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" - "github.com/sirupsen/logrus" ) func (s *journald) Close() error { s.mu.Lock() - s.closed = true for r := range s.readers { r.ProducerGone() delete(s.readers, r) - } s.mu.Unlock() return nil } -func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool) { - var msg, data, cursor *C.char - var length C.size_t - var stamp C.uint64_t - var priority, partial C.int - var done bool +// convert error code returned from a sd_journal_* function +// (which returns -errno) to a string +func CErr(ret C.int) string { + return C.GoString(C.strerror(C.int(-ret))) +} + +func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool, int) { + var ( + msg, data, cursor *C.char + length C.size_t + stamp C.uint64_t + priority, partial C.int + done bool + shown int + ) // Walk the journal from here forward until we run out of new entries // or we reach the until value (if provided). @@ -230,6 +191,7 @@ drain: kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]}) } + // Send the log message, unless the consumer is gone select { case <-logWatcher.WatchConsumerGone(): @@ -241,6 +203,7 @@ drain: Timestamp: timestamp.In(time.UTC), Attrs: attrs, }: + shown++ } } // If we're at the end of the journal, we're done (for now). @@ -255,73 +218,57 @@ drain: // ensure that we won't be freeing an address that's invalid cursor = nil } - return cursor, done + return cursor, done, shown } -func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, pfd [2]C.int, cursor *C.char, untilUnixMicro uint64) *C.char { +func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, cursor *C.char, untilUnixMicro uint64) *C.char { s.mu.Lock() s.readers[logWatcher] = struct{}{} - if s.closed { - // the journald Logger is closed, presumably because the container has been - // reset. So we shouldn't follow, because we'll never be woken up. But we - // should make one more drainJournal call to be sure we've got all the logs. - // Close pfd[1] so that one drainJournal happens, then cleanup, then return. - C.close(pfd[1]) - } s.mu.Unlock() - newCursor := make(chan *C.char) + waitTimeout := C.uint64_t(250000) // 0.25s - go func() { - for { - // Keep copying journal data out until we're notified to stop - // or we hit an error. - status := C.wait_for_data_cancelable(j, pfd[0]) - if status < 0 { - cerrstr := C.strerror(C.int(-status)) - errstr := C.GoString(cerrstr) - fmtstr := "error %q while attempting to follow journal for container %q" - logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) - break - } - - var done bool - cursor, done = s.drainJournal(logWatcher, j, cursor, untilUnixMicro) - - if status != 1 || done { - // We were notified to stop - break + for { + status := C.sd_journal_wait(j, waitTimeout) + if status < 0 { + logWatcher.Err <- errors.New("error waiting for journal: " + CErr(status)) + goto cleanup + } + select { + case <-logWatcher.WatchConsumerGone(): + goto cleanup // won't be able to write anything anymore + case <-logWatcher.WatchProducerGone(): + // container is gone, drain journal + default: + // container is still alive + if status == C.SD_JOURNAL_NOP { + // no new data -- keep waiting + continue } } - - // Clean up. - C.close(pfd[0]) - s.mu.Lock() - delete(s.readers, logWatcher) - s.mu.Unlock() - close(logWatcher.Msg) - newCursor <- cursor - }() - - // Wait until we're told to stop. - select { - case cursor = <-newCursor: - case <-logWatcher.WatchConsumerGone(): - // Notify the other goroutine that its work is done. - C.close(pfd[1]) - cursor = <-newCursor + newCursor, done, recv := s.drainJournal(logWatcher, j, cursor, untilUnixMicro) + cursor = newCursor + if done || (status == C.SD_JOURNAL_NOP && recv == 0) { + break + } } +cleanup: + s.mu.Lock() + delete(s.readers, logWatcher) + s.mu.Unlock() + close(logWatcher.Msg) return cursor } func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { - var j *C.sd_journal - var cmatch, cursor *C.char - var stamp C.uint64_t - var sinceUnixMicro uint64 - var untilUnixMicro uint64 - var pipes [2]C.int + var ( + j *C.sd_journal + cmatch, cursor *C.char + stamp C.uint64_t + sinceUnixMicro uint64 + untilUnixMicro uint64 + ) // Get a handle to the journal. rc := C.sd_journal_open(&j, C.int(0)) @@ -409,19 +356,12 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon return } } - cursor, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro) + cursor, _, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro) if config.Follow { - // Create a pipe that we can poll at the same time as - // the journald descriptor. - ret := C.pipe(&pipes[0]) - if ret < 0 { - logWatcher.Err <- fmt.Errorf("error creating journald notification pipe") - } else { - cursor = s.followJournal(logWatcher, j, pipes, cursor, untilUnixMicro) - // Let followJournal handle freeing the journal context - // object and closing the channel. - following = true - } + cursor = s.followJournal(logWatcher, j, cursor, untilUnixMicro) + // Let followJournal handle freeing the journal context + // object and closing the channel. + following = true } C.free(unsafe.Pointer(cursor)) From b73fb8fd5d521081c92b5c2cce334c21b2e0ff5f Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Sun, 10 Mar 2019 03:39:33 +0000 Subject: [PATCH 7/9] journald/read: avoid piling up open files If we take a long time to process log messages, and during that time journal file rotation occurs, the journald client library will keep those rotated files open until sd_journal_process() is called. By periodically calling sd_journal_process() during the processing loop we shrink the window of time a client instance has open file descriptors for rotated (deleted) journal files. This code is modelled after that of journalctl [1]; the above explanation as well as the value of 1024 is taken from there. [v2: fix CErr() argument] [1] https://github.com/systemd/systemd/blob/dc16327c48d/src/journal/journalctl.c#L2676 Signed-off-by: Kir Kolyshkin --- daemon/logger/journald/read.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index 3dfa08b0a4..36240a832a 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -113,6 +113,7 @@ import ( "github.com/coreos/go-systemd/journal" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" + "github.com/sirupsen/logrus" ) func (s *journald) Close() error { @@ -205,6 +206,16 @@ drain: }: shown++ } + // Call sd_journal_process() periodically during the processing loop + // to close any opened file descriptors for rotated (deleted) journal files. + if shown%1024 == 0 { + if ret := C.sd_journal_process(j); ret < 0 { + // log a warning but ignore it for now + logrus.WithField("container", s.vars["CONTAINER_ID_FULL"]). + WithField("error", CErr(ret)). + Warn("journald: error processing journal") + } + } } // If we're at the end of the journal, we're done (for now). if C.sd_journal_next(j) <= 0 { From dd4bfe30a8ac1b31630310090dc36ae3d9253444 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Tue, 12 Mar 2019 00:36:44 -0700 Subject: [PATCH 8/9] journald: fix for --tail 0 From the first glance, `docker logs --tail 0` does not make sense, as it is supposed to produce no output, but `tail -n 0` from GNU coreutils is working like that, plus there is even a test case (`TestLogsTail` in integration-cli/docker_cli_logs_test.go). Now, something like `docker logs --follow --tail 0` makes total sense, so let's make it work. (NOTE if --tail is not used, config.Tail is set to -1) Signed-off-by: Kir Kolyshkin --- daemon/logger/journald/read.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index 36240a832a..d6458b4986 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -332,7 +332,7 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon nano := config.Until.UnixNano() untilUnixMicro = uint64(nano / 1000) } - if config.Tail > 0 { + if config.Tail >= 0 { // If until time provided, start from there. // Otherwise start at the end of the journal. if untilUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)) < 0 { @@ -343,7 +343,7 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon return } // (Try to) skip backwards by the requested number of lines... - if C.sd_journal_previous_skip(j, C.uint64_t(config.Tail)) > 0 { + if C.sd_journal_previous_skip(j, C.uint64_t(config.Tail)) >= 0 { // ...but not before "since" if sinceUnixMicro != 0 && C.sd_journal_get_realtime_usec(j, &stamp) == 0 && @@ -367,7 +367,9 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon return } } - cursor, _, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro) + if config.Tail != 0 { // special case for --tail 0 + cursor, _, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro) + } if config.Follow { cursor = s.followJournal(logWatcher, j, cursor, untilUnixMicro) // Let followJournal handle freeing the journal context From 20a0e58a794cfb9b1a1f757d222248e22555f7f0 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Wed, 13 Mar 2019 16:30:46 -0700 Subject: [PATCH 9/9] journald/read: fix/unify errors 1. Use "in-place" variables for if statements to limit their scope to the respectful `if` block. 2. Report the error returned from sd_journal_* by using CErr(). 3. Use errors.New() instead of fmt.Errorf(). Signed-off-by: Kir Kolyshkin --- daemon/logger/journald/read.go | 47 +++++++++++++++++----------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go index d6458b4986..7b221983b6 100644 --- a/daemon/logger/journald/read.go +++ b/daemon/logger/journald/read.go @@ -105,7 +105,6 @@ import "C" import ( "errors" - "fmt" "strings" "time" "unsafe" @@ -282,16 +281,14 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon ) // Get a handle to the journal. - rc := C.sd_journal_open(&j, C.int(0)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error opening journal") + if rc := C.sd_journal_open(&j, C.int(0)); rc != 0 { + logWatcher.Err <- errors.New("error opening journal: " + CErr(rc)) close(logWatcher.Msg) return } if config.Follow { // Initialize library inotify watches early - rc = C.sd_journal_get_fd(j) - if rc < 0 { + if rc := C.sd_journal_get_fd(j); rc < 0 { logWatcher.Err <- errors.New("error getting journald fd: " + CErr(rc)) close(logWatcher.Msg) return @@ -309,17 +306,15 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon C.sd_journal_close(j) }() // Remove limits on the size of data items that we'll retrieve. - rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error setting journal data threshold") + if rc := C.sd_journal_set_data_threshold(j, C.size_t(0)); rc != 0 { + logWatcher.Err <- errors.New("error setting journal data threshold: " + CErr(rc)) return } // Add a match to have the library do the searching for us. cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) defer C.free(unsafe.Pointer(cmatch)) - rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error setting journal match") + if rc := C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)); rc != 0 { + logWatcher.Err <- errors.New("error setting journal match: " + CErr(rc)) return } // If we have a cutoff time, convert it to Unix time once. @@ -335,11 +330,13 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon if config.Tail >= 0 { // If until time provided, start from there. // Otherwise start at the end of the journal. - if untilUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking provided until value") - return - } else if C.sd_journal_seek_tail(j) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking to end of journal") + if untilUnixMicro != 0 { + if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)); rc != 0 { + logWatcher.Err <- errors.New("error seeking provided until value: " + CErr(rc)) + return + } + } else if rc := C.sd_journal_seek_tail(j); rc != 0 { + logWatcher.Err <- errors.New("error seeking to end of journal: " + CErr(rc)) return } // (Try to) skip backwards by the requested number of lines... @@ -353,17 +350,19 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon } } else { // Start at the beginning of the journal. - if C.sd_journal_seek_head(j) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking to start of journal") + if rc := C.sd_journal_seek_head(j); rc != 0 { + logWatcher.Err <- errors.New("error seeking to start of journal: " + CErr(rc)) return } // If we have a cutoff date, fast-forward to it. - if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { - logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") - return + if sinceUnixMicro != 0 { + if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)); rc != 0 { + logWatcher.Err <- errors.New("error seeking to start time in journal: " + CErr(rc)) + return + } } - if C.sd_journal_next(j) < 0 { - logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") + if rc := C.sd_journal_next(j); rc < 0 { + logWatcher.Err <- errors.New("error skipping to next journal entry: " + CErr(rc)) return } }