2015-07-21 22:26:52 +00:00
|
|
|
// Package logger defines interfaces that logger drivers implement to
|
|
|
|
// log messages.
|
|
|
|
//
|
|
|
|
// The other half of a logger driver is the implementation of the
|
|
|
|
// factory, which holds the contextual instance information that
|
|
|
|
// allows multiple loggers of the same type to perform different
|
|
|
|
// actions, such as logging to different locations.
|
2018-02-05 21:05:59 +00:00
|
|
|
package logger // import "github.com/docker/docker/daemon/logger"
|
2015-02-03 23:41:26 +00:00
|
|
|
|
2015-04-09 04:23:30 +00:00
|
|
|
import (
|
2016-06-27 21:14:17 +00:00
|
|
|
"sync"
|
2015-04-09 04:23:30 +00:00
|
|
|
"time"
|
2015-07-03 13:50:06 +00:00
|
|
|
|
2017-03-20 17:07:04 +00:00
|
|
|
"github.com/docker/docker/api/types/backend"
|
2015-04-09 04:23:30 +00:00
|
|
|
)
|
|
|
|
|
2017-07-19 14:20:13 +00:00
|
|
|
// ErrReadLogsNotSupported is returned when the underlying log driver does not support reading
|
|
|
|
type ErrReadLogsNotSupported struct{}
|
|
|
|
|
|
|
|
func (ErrReadLogsNotSupported) Error() string {
|
|
|
|
return "configured logging driver does not support reading"
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotImplemented makes this error implement the `NotImplemented` interface from api/errdefs
|
|
|
|
func (ErrReadLogsNotSupported) NotImplemented() {}
|
2015-07-03 13:50:06 +00:00
|
|
|
|
|
|
|
const (
|
|
|
|
logWatcherBufferSize = 4096
|
|
|
|
)
|
2015-02-03 23:41:26 +00:00
|
|
|
|
2016-12-12 14:54:20 +00:00
|
|
|
var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }}
|
|
|
|
|
|
|
|
// NewMessage returns a new message from the message sync.Pool
|
|
|
|
func NewMessage() *Message {
|
|
|
|
return messagePool.Get().(*Message)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PutMessage puts the specified message back n the message pool.
|
|
|
|
// The message fields are reset before putting into the pool.
|
|
|
|
func PutMessage(msg *Message) {
|
|
|
|
msg.reset()
|
|
|
|
messagePool.Put(msg)
|
|
|
|
}
|
|
|
|
|
2018-02-10 11:26:20 +00:00
|
|
|
// Message is data structure that represents piece of output produced by some
|
Improve logging of long log lines
This change updates how we handle long lines of output from the
container. The previous logic used a bufio reader to read entire lines
of output from the container through an intermediate BytesPipe, and that
allowed the container to cause dockerd to consume an unconstrained
amount of memory as it attempted to collect a whole line of output, by
outputting data without newlines.
To avoid that, we replace the bufio reader with our own buffering scheme
that handles log lines up to 16k in length, breaking up anything longer
than that into multiple chunks. If we can dispense with noting this
detail properly at the end of output, we can switch from using
ReadBytes() to using ReadLine() instead. We add a field ("Partial") to
the log message structure to flag when we pass data to the log driver
that did not end with a newline.
The Line member of Message structures that we pass to log drivers is now
a slice into data which can be overwritten between calls to the log
driver's Log() method, so drivers which batch up Messages before
processing them need to take additional care: we add a function
(logger.CopyMessage()) that can be used to create a deep copy of a
Message structure, and modify the awslogs driver to use it.
We update the jsonfile log driver to append a "\n" to the data that it
logs to disk only when the Partial flag is false (it previously did so
unconditionally), to make its "logs" output correctly reproduce the data
as we received it.
Likewise, we modify the journald log driver to add a data field with
value CONTAINER_PARTIAL_MESSAGE=true to entries when the Partial flag is
true, and update its "logs" reader to refrain from appending a "\n" to
the data that it retrieves if it does not see this field/value pair (it
also previously did this unconditionally).
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> (github: nalind)
2016-05-24 18:12:47 +00:00
|
|
|
// container. The Line member is a slice of an array whose contents can be
|
|
|
|
// changed after a log driver's Log() method returns.
|
2017-03-20 17:07:04 +00:00
|
|
|
//
|
|
|
|
// Message is subtyped from backend.LogMessage because there is a lot of
|
|
|
|
// internal complexity around the Message type that should not be exposed
|
|
|
|
// to any package not explicitly importing the logger type.
|
|
|
|
type Message backend.LogMessage
|
Improve logging of long log lines
This change updates how we handle long lines of output from the
container. The previous logic used a bufio reader to read entire lines
of output from the container through an intermediate BytesPipe, and that
allowed the container to cause dockerd to consume an unconstrained
amount of memory as it attempted to collect a whole line of output, by
outputting data without newlines.
To avoid that, we replace the bufio reader with our own buffering scheme
that handles log lines up to 16k in length, breaking up anything longer
than that into multiple chunks. If we can dispense with noting this
detail properly at the end of output, we can switch from using
ReadBytes() to using ReadLine() instead. We add a field ("Partial") to
the log message structure to flag when we pass data to the log driver
that did not end with a newline.
The Line member of Message structures that we pass to log drivers is now
a slice into data which can be overwritten between calls to the log
driver's Log() method, so drivers which batch up Messages before
processing them need to take additional care: we add a function
(logger.CopyMessage()) that can be used to create a deep copy of a
Message structure, and modify the awslogs driver to use it.
We update the jsonfile log driver to append a "\n" to the data that it
logs to disk only when the Partial flag is false (it previously did so
unconditionally), to make its "logs" output correctly reproduce the data
as we received it.
Likewise, we modify the journald log driver to add a data field with
value CONTAINER_PARTIAL_MESSAGE=true to entries when the Partial flag is
true, and update its "logs" reader to refrain from appending a "\n" to
the data that it retrieves if it does not see this field/value pair (it
also previously did this unconditionally).
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> (github: nalind)
2016-05-24 18:12:47 +00:00
|
|
|
|
2016-12-12 14:54:20 +00:00
|
|
|
// reset sets the message back to default values
|
|
|
|
// This is used when putting a message back into the message pool.
|
|
|
|
func (m *Message) reset() {
|
2022-03-01 18:34:32 +00:00
|
|
|
*m = Message{Line: m.Line[:0]}
|
2016-04-08 16:15:08 +00:00
|
|
|
}
|
|
|
|
|
2017-03-20 17:07:04 +00:00
|
|
|
// AsLogMessage returns a pointer to the message as a pointer to
|
|
|
|
// backend.LogMessage, which is an identical type with a different purpose
|
|
|
|
func (m *Message) AsLogMessage() *backend.LogMessage {
|
|
|
|
return (*backend.LogMessage)(m)
|
2015-02-03 23:41:26 +00:00
|
|
|
}
|
|
|
|
|
2015-07-21 22:26:52 +00:00
|
|
|
// Logger is the interface for docker logging drivers.
|
2015-02-03 23:41:26 +00:00
|
|
|
type Logger interface {
|
|
|
|
Log(*Message) error
|
|
|
|
Name() string
|
|
|
|
Close() error
|
2015-07-01 00:40:13 +00:00
|
|
|
}
|
|
|
|
|
2017-08-22 17:52:52 +00:00
|
|
|
// SizedLogger is the interface for logging drivers that can control
|
|
|
|
// the size of buffer used for their messages.
|
|
|
|
type SizedLogger interface {
|
|
|
|
Logger
|
|
|
|
BufSize() int
|
|
|
|
}
|
|
|
|
|
2015-07-21 22:26:52 +00:00
|
|
|
// ReadConfig is the configuration passed into ReadLogs.
|
2015-07-03 13:50:06 +00:00
|
|
|
type ReadConfig struct {
|
|
|
|
Since time.Time
|
2017-04-28 11:53:00 +00:00
|
|
|
Until time.Time
|
2015-07-03 13:50:06 +00:00
|
|
|
Tail int
|
|
|
|
Follow bool
|
|
|
|
}
|
|
|
|
|
2015-07-21 22:26:52 +00:00
|
|
|
// LogReader is the interface for reading log messages for loggers that support reading.
|
2015-07-03 13:50:06 +00:00
|
|
|
type LogReader interface {
|
2022-11-02 17:47:50 +00:00
|
|
|
// ReadLogs reads logs from underlying logging backend.
|
2015-07-03 13:50:06 +00:00
|
|
|
ReadLogs(ReadConfig) *LogWatcher
|
|
|
|
}
|
|
|
|
|
2015-07-21 22:26:52 +00:00
|
|
|
// LogWatcher is used when consuming logs read from the LogReader interface.
|
2015-07-03 13:50:06 +00:00
|
|
|
type LogWatcher struct {
|
2015-07-21 22:26:52 +00:00
|
|
|
// For sending log messages to a reader.
|
2015-07-03 13:50:06 +00:00
|
|
|
Msg chan *Message
|
2018-10-05 13:37:42 +00:00
|
|
|
// For sending error messages that occur while reading logs.
|
daemon.ContainerLogs(): fix resource leak on follow
When daemon.ContainerLogs() is called with options.follow=true
(as in "docker logs --follow"), the "loggerutils.followLogs()"
function never returns (even then the logs consumer is gone).
As a result, all the resources associated with it (including
an opened file descriptor for the log file being read, two FDs
for a pipe, and two FDs for inotify watch) are never released.
If this is repeated (such as by running "docker logs --follow"
and pressing Ctrl-C a few times), this results in DoS caused by
either hitting the limit of inotify watches, or the limit of
opened files. The only cure is daemon restart.
Apparently, what happens is:
1. logs producer (a container) is gone, calling (*LogWatcher).Close()
for all its readers (daemon/logger/jsonfilelog/jsonfilelog.go:175).
2. WatchClose() is properly handled by a dedicated goroutine in
followLogs(), cancelling the context.
3. Upon receiving the ctx.Done(), the code in followLogs()
(daemon/logger/loggerutils/logfile.go#L626-L638) keeps to
send messages _synchronously_ (which is OK for now).
4. Logs consumer is gone (Ctrl-C is pressed on a terminal running
"docker logs --follow"). Method (*LogWatcher).Close() is properly
called (see daemon/logs.go:114). Since it was called before and
due to to once.Do(), nothing happens (which is kinda good, as
otherwise it will panic on closing a closed channel).
5. A goroutine (see item 3 above) keeps sending log messages
synchronously to the logWatcher.Msg channel. Since the
channel reader is gone, the channel send operation blocks forever,
and resource cleanup set up in defer statements at the beginning
of followLogs() never happens.
Alas, the fix is somewhat complicated:
1. Distinguish between close from logs producer and logs consumer.
To that effect,
- yet another channel is added to LogWatcher();
- {Watch,}Close() are renamed to {Watch,}ProducerGone();
- {Watch,}ConsumerGone() are added;
*NOTE* that ProducerGone()/WatchProducerGone() pair is ONLY needed
in order to stop ConsumerLogs(follow=true) when a container is stopped;
otherwise we're not interested in it. In other words, we're only
using it in followLogs().
2. Code that was doing (logWatcher*).Close() is modified to either call
ProducerGone() or ConsumerGone(), depending on the context.
3. Code that was waiting for WatchClose() is modified to wait for
either ConsumerGone() or ProducerGone(), or both, depending on the
context.
4. followLogs() are modified accordingly:
- context cancellation is happening on WatchProducerGone(),
and once it's received the FileWatcher is closed and waitRead()
returns errDone on EOF (i.e. log rotation handling logic is disabled);
- due to this, code that was writing synchronously to logWatcher.Msg
can be and is removed as the code above it handles this case;
- function returns once ConsumerGone is received, freeing all the
resources -- this is the bugfix itself.
While at it,
1. Let's also remove the ctx usage to simplify the code a bit.
It was introduced by commit a69a59ffc7e3d ("Decouple removing the
fileWatcher from reading") in order to fix a bug. The bug was actually
a deadlock in fsnotify, and the fix was just a workaround. Since then
the fsnofify bug has been fixed, and a new fsnotify was vendored in.
For more details, please see
https://github.com/moby/moby/pull/27782#issuecomment-416794490
2. Since `(*filePoller).Close()` is fixed to remove all the files
being watched, there is no need to explicitly call
fileWatcher.Remove(name) anymore, so get rid of the extra code.
Should fix https://github.com/moby/moby/issues/37391
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2018-08-01 04:03:55 +00:00
|
|
|
Err chan error
|
|
|
|
consumerOnce sync.Once
|
|
|
|
consumerGone chan struct{}
|
2015-07-03 13:50:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewLogWatcher returns a new LogWatcher.
|
|
|
|
func NewLogWatcher() *LogWatcher {
|
|
|
|
return &LogWatcher{
|
daemon.ContainerLogs(): fix resource leak on follow
When daemon.ContainerLogs() is called with options.follow=true
(as in "docker logs --follow"), the "loggerutils.followLogs()"
function never returns (even then the logs consumer is gone).
As a result, all the resources associated with it (including
an opened file descriptor for the log file being read, two FDs
for a pipe, and two FDs for inotify watch) are never released.
If this is repeated (such as by running "docker logs --follow"
and pressing Ctrl-C a few times), this results in DoS caused by
either hitting the limit of inotify watches, or the limit of
opened files. The only cure is daemon restart.
Apparently, what happens is:
1. logs producer (a container) is gone, calling (*LogWatcher).Close()
for all its readers (daemon/logger/jsonfilelog/jsonfilelog.go:175).
2. WatchClose() is properly handled by a dedicated goroutine in
followLogs(), cancelling the context.
3. Upon receiving the ctx.Done(), the code in followLogs()
(daemon/logger/loggerutils/logfile.go#L626-L638) keeps to
send messages _synchronously_ (which is OK for now).
4. Logs consumer is gone (Ctrl-C is pressed on a terminal running
"docker logs --follow"). Method (*LogWatcher).Close() is properly
called (see daemon/logs.go:114). Since it was called before and
due to to once.Do(), nothing happens (which is kinda good, as
otherwise it will panic on closing a closed channel).
5. A goroutine (see item 3 above) keeps sending log messages
synchronously to the logWatcher.Msg channel. Since the
channel reader is gone, the channel send operation blocks forever,
and resource cleanup set up in defer statements at the beginning
of followLogs() never happens.
Alas, the fix is somewhat complicated:
1. Distinguish between close from logs producer and logs consumer.
To that effect,
- yet another channel is added to LogWatcher();
- {Watch,}Close() are renamed to {Watch,}ProducerGone();
- {Watch,}ConsumerGone() are added;
*NOTE* that ProducerGone()/WatchProducerGone() pair is ONLY needed
in order to stop ConsumerLogs(follow=true) when a container is stopped;
otherwise we're not interested in it. In other words, we're only
using it in followLogs().
2. Code that was doing (logWatcher*).Close() is modified to either call
ProducerGone() or ConsumerGone(), depending on the context.
3. Code that was waiting for WatchClose() is modified to wait for
either ConsumerGone() or ProducerGone(), or both, depending on the
context.
4. followLogs() are modified accordingly:
- context cancellation is happening on WatchProducerGone(),
and once it's received the FileWatcher is closed and waitRead()
returns errDone on EOF (i.e. log rotation handling logic is disabled);
- due to this, code that was writing synchronously to logWatcher.Msg
can be and is removed as the code above it handles this case;
- function returns once ConsumerGone is received, freeing all the
resources -- this is the bugfix itself.
While at it,
1. Let's also remove the ctx usage to simplify the code a bit.
It was introduced by commit a69a59ffc7e3d ("Decouple removing the
fileWatcher from reading") in order to fix a bug. The bug was actually
a deadlock in fsnotify, and the fix was just a workaround. Since then
the fsnofify bug has been fixed, and a new fsnotify was vendored in.
For more details, please see
https://github.com/moby/moby/pull/27782#issuecomment-416794490
2. Since `(*filePoller).Close()` is fixed to remove all the files
being watched, there is no need to explicitly call
fileWatcher.Remove(name) anymore, so get rid of the extra code.
Should fix https://github.com/moby/moby/issues/37391
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2018-08-01 04:03:55 +00:00
|
|
|
Msg: make(chan *Message, logWatcherBufferSize),
|
|
|
|
Err: make(chan error, 1),
|
|
|
|
consumerGone: make(chan struct{}),
|
2015-07-03 13:50:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
daemon.ContainerLogs(): fix resource leak on follow
When daemon.ContainerLogs() is called with options.follow=true
(as in "docker logs --follow"), the "loggerutils.followLogs()"
function never returns (even then the logs consumer is gone).
As a result, all the resources associated with it (including
an opened file descriptor for the log file being read, two FDs
for a pipe, and two FDs for inotify watch) are never released.
If this is repeated (such as by running "docker logs --follow"
and pressing Ctrl-C a few times), this results in DoS caused by
either hitting the limit of inotify watches, or the limit of
opened files. The only cure is daemon restart.
Apparently, what happens is:
1. logs producer (a container) is gone, calling (*LogWatcher).Close()
for all its readers (daemon/logger/jsonfilelog/jsonfilelog.go:175).
2. WatchClose() is properly handled by a dedicated goroutine in
followLogs(), cancelling the context.
3. Upon receiving the ctx.Done(), the code in followLogs()
(daemon/logger/loggerutils/logfile.go#L626-L638) keeps to
send messages _synchronously_ (which is OK for now).
4. Logs consumer is gone (Ctrl-C is pressed on a terminal running
"docker logs --follow"). Method (*LogWatcher).Close() is properly
called (see daemon/logs.go:114). Since it was called before and
due to to once.Do(), nothing happens (which is kinda good, as
otherwise it will panic on closing a closed channel).
5. A goroutine (see item 3 above) keeps sending log messages
synchronously to the logWatcher.Msg channel. Since the
channel reader is gone, the channel send operation blocks forever,
and resource cleanup set up in defer statements at the beginning
of followLogs() never happens.
Alas, the fix is somewhat complicated:
1. Distinguish between close from logs producer and logs consumer.
To that effect,
- yet another channel is added to LogWatcher();
- {Watch,}Close() are renamed to {Watch,}ProducerGone();
- {Watch,}ConsumerGone() are added;
*NOTE* that ProducerGone()/WatchProducerGone() pair is ONLY needed
in order to stop ConsumerLogs(follow=true) when a container is stopped;
otherwise we're not interested in it. In other words, we're only
using it in followLogs().
2. Code that was doing (logWatcher*).Close() is modified to either call
ProducerGone() or ConsumerGone(), depending on the context.
3. Code that was waiting for WatchClose() is modified to wait for
either ConsumerGone() or ProducerGone(), or both, depending on the
context.
4. followLogs() are modified accordingly:
- context cancellation is happening on WatchProducerGone(),
and once it's received the FileWatcher is closed and waitRead()
returns errDone on EOF (i.e. log rotation handling logic is disabled);
- due to this, code that was writing synchronously to logWatcher.Msg
can be and is removed as the code above it handles this case;
- function returns once ConsumerGone is received, freeing all the
resources -- this is the bugfix itself.
While at it,
1. Let's also remove the ctx usage to simplify the code a bit.
It was introduced by commit a69a59ffc7e3d ("Decouple removing the
fileWatcher from reading") in order to fix a bug. The bug was actually
a deadlock in fsnotify, and the fix was just a workaround. Since then
the fsnofify bug has been fixed, and a new fsnotify was vendored in.
For more details, please see
https://github.com/moby/moby/pull/27782#issuecomment-416794490
2. Since `(*filePoller).Close()` is fixed to remove all the files
being watched, there is no need to explicitly call
fileWatcher.Remove(name) anymore, so get rid of the extra code.
Should fix https://github.com/moby/moby/issues/37391
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2018-08-01 04:03:55 +00:00
|
|
|
// ConsumerGone notifies that the logs consumer is gone.
|
|
|
|
func (w *LogWatcher) ConsumerGone() {
|
|
|
|
// only close if not already closed
|
|
|
|
w.consumerOnce.Do(func() {
|
|
|
|
close(w.consumerGone)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// WatchConsumerGone returns a channel receiver that receives notification
|
|
|
|
// when the log watcher consumer is gone.
|
|
|
|
func (w *LogWatcher) WatchConsumerGone() <-chan struct{} {
|
|
|
|
return w.consumerGone
|
2015-02-03 23:41:26 +00:00
|
|
|
}
|
2016-11-14 18:53:53 +00:00
|
|
|
|
2018-02-10 11:26:20 +00:00
|
|
|
// Capability defines the list of capabilities that a driver can implement
|
2016-11-14 18:53:53 +00:00
|
|
|
// These capabilities are not required to be a logging driver, however do
|
|
|
|
// determine how a logging driver can be used
|
|
|
|
type Capability struct {
|
|
|
|
// Determines if a log driver can read back logs
|
|
|
|
ReadLogs bool
|
|
|
|
}
|