moby/daemon/logger/logger.go

142 lines
3.9 KiB
Go
Raw Normal View History

// Package logger defines interfaces that logger drivers implement to
// log messages.
//
// The other half of a logger driver is the implementation of the
// factory, which holds the contextual instance information that
// allows multiple loggers of the same type to perform different
// actions, such as logging to different locations.
package logger
import (
"errors"
"sort"
"strings"
"sync"
"time"
"github.com/docker/docker/pkg/jsonlog"
)
// ErrReadLogsNotSupported is returned when the logger does not support reading logs.
var ErrReadLogsNotSupported = errors.New("configured logging driver does not support reading")
const (
// TimeFormat is the time format used for timestamps sent to log readers.
TimeFormat = jsonlog.RFC3339NanoFixed
logWatcherBufferSize = 4096
)
var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }}
// NewMessage returns a new message from the message sync.Pool
func NewMessage() *Message {
return messagePool.Get().(*Message)
}
// PutMessage puts the specified message back n the message pool.
// The message fields are reset before putting into the pool.
func PutMessage(msg *Message) {
msg.reset()
messagePool.Put(msg)
}
Improve logging of long log lines This change updates how we handle long lines of output from the container. The previous logic used a bufio reader to read entire lines of output from the container through an intermediate BytesPipe, and that allowed the container to cause dockerd to consume an unconstrained amount of memory as it attempted to collect a whole line of output, by outputting data without newlines. To avoid that, we replace the bufio reader with our own buffering scheme that handles log lines up to 16k in length, breaking up anything longer than that into multiple chunks. If we can dispense with noting this detail properly at the end of output, we can switch from using ReadBytes() to using ReadLine() instead. We add a field ("Partial") to the log message structure to flag when we pass data to the log driver that did not end with a newline. The Line member of Message structures that we pass to log drivers is now a slice into data which can be overwritten between calls to the log driver's Log() method, so drivers which batch up Messages before processing them need to take additional care: we add a function (logger.CopyMessage()) that can be used to create a deep copy of a Message structure, and modify the awslogs driver to use it. We update the jsonfile log driver to append a "\n" to the data that it logs to disk only when the Partial flag is false (it previously did so unconditionally), to make its "logs" output correctly reproduce the data as we received it. Likewise, we modify the journald log driver to add a data field with value CONTAINER_PARTIAL_MESSAGE=true to entries when the Partial flag is true, and update its "logs" reader to refrain from appending a "\n" to the data that it retrieves if it does not see this field/value pair (it also previously did this unconditionally). Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> (github: nalind)
2016-05-24 18:12:47 +00:00
// Message is datastructure that represents piece of output produced by some
// container. The Line member is a slice of an array whose contents can be
// changed after a log driver's Log() method returns.
// Any changes made to this struct must also be updated in the `reset` function
type Message struct {
Line []byte
Source string
Timestamp time.Time
Attrs LogAttributes
Improve logging of long log lines This change updates how we handle long lines of output from the container. The previous logic used a bufio reader to read entire lines of output from the container through an intermediate BytesPipe, and that allowed the container to cause dockerd to consume an unconstrained amount of memory as it attempted to collect a whole line of output, by outputting data without newlines. To avoid that, we replace the bufio reader with our own buffering scheme that handles log lines up to 16k in length, breaking up anything longer than that into multiple chunks. If we can dispense with noting this detail properly at the end of output, we can switch from using ReadBytes() to using ReadLine() instead. We add a field ("Partial") to the log message structure to flag when we pass data to the log driver that did not end with a newline. The Line member of Message structures that we pass to log drivers is now a slice into data which can be overwritten between calls to the log driver's Log() method, so drivers which batch up Messages before processing them need to take additional care: we add a function (logger.CopyMessage()) that can be used to create a deep copy of a Message structure, and modify the awslogs driver to use it. We update the jsonfile log driver to append a "\n" to the data that it logs to disk only when the Partial flag is false (it previously did so unconditionally), to make its "logs" output correctly reproduce the data as we received it. Likewise, we modify the journald log driver to add a data field with value CONTAINER_PARTIAL_MESSAGE=true to entries when the Partial flag is true, and update its "logs" reader to refrain from appending a "\n" to the data that it retrieves if it does not see this field/value pair (it also previously did this unconditionally). Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> (github: nalind)
2016-05-24 18:12:47 +00:00
Partial bool
}
// reset sets the message back to default values
// This is used when putting a message back into the message pool.
// Any changes to the `Message` struct should be reflected here.
func (m *Message) reset() {
m.Line = m.Line[:0]
m.Source = ""
m.Attrs = nil
m.Partial = false
}
// LogAttributes is used to hold the extra attributes available in the log message
// Primarily used for converting the map type to string and sorting.
type LogAttributes map[string]string
type byKey []string
func (s byKey) Len() int { return len(s) }
func (s byKey) Less(i, j int) bool {
keyI := strings.Split(s[i], "=")
keyJ := strings.Split(s[j], "=")
return keyI[0] < keyJ[0]
}
func (s byKey) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (a LogAttributes) String() string {
var ss byKey
for k, v := range a {
ss = append(ss, k+"="+v)
}
sort.Sort(ss)
return strings.Join(ss, ",")
}
// Logger is the interface for docker logging drivers.
type Logger interface {
Log(*Message) error
Name() string
Close() error
}
// ReadConfig is the configuration passed into ReadLogs.
type ReadConfig struct {
Since time.Time
Tail int
Follow bool
}
// LogReader is the interface for reading log messages for loggers that support reading.
type LogReader interface {
// Read logs from underlying logging backend
ReadLogs(ReadConfig) *LogWatcher
}
// LogWatcher is used when consuming logs read from the LogReader interface.
type LogWatcher struct {
// For sending log messages to a reader.
Msg chan *Message
// For sending error messages that occur while while reading logs.
Err chan error
closeOnce sync.Once
closeNotifier chan struct{}
}
// NewLogWatcher returns a new LogWatcher.
func NewLogWatcher() *LogWatcher {
return &LogWatcher{
Msg: make(chan *Message, logWatcherBufferSize),
Err: make(chan error, 1),
closeNotifier: make(chan struct{}),
}
}
// Close notifies the underlying log reader to stop.
func (w *LogWatcher) Close() {
// only close if not already closed
w.closeOnce.Do(func() {
close(w.closeNotifier)
})
}
// WatchClose returns a channel receiver that receives notification
// when the watcher has been closed. This should only be called from
// one goroutine.
func (w *LogWatcher) WatchClose() <-chan struct{} {
return w.closeNotifier
}