Merge pull request #11485 from wlan0/rollover_log
Add rollover log driver, and --log-driver-opts flag
This commit is contained in:
commit
415f744d0c
16 changed files with 426 additions and 103 deletions
|
@ -19,7 +19,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
|
|||
follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
|
||||
since := cmd.String([]string{"-since"}, "", "Show logs since timestamp")
|
||||
times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
|
||||
tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs")
|
||||
tail := cmd.String([]string{"-tail"}, "latest", "Number of lines to show from the end of the logs")
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
|
|
@ -710,7 +710,10 @@ func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) {
|
|||
|
||||
func (container *Container) getLogConfig() runconfig.LogConfig {
|
||||
cfg := container.hostConfig.LogConfig
|
||||
if cfg.Type != "" { // container has log driver configured
|
||||
if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured
|
||||
if cfg.Type == "" {
|
||||
cfg.Type = jsonfilelog.Name
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
// Use daemon's default log config for containers
|
||||
|
@ -719,6 +722,9 @@ func (container *Container) getLogConfig() runconfig.LogConfig {
|
|||
|
||||
func (container *Container) getLogger() (logger.Logger, error) {
|
||||
cfg := container.getLogConfig()
|
||||
if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := logger.GetLogDriver(cfg.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get logging factory: %v", err)
|
||||
|
@ -891,28 +897,32 @@ func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer
|
|||
|
||||
if logs {
|
||||
logDriver, err := c.getLogger()
|
||||
cLog, err := logDriver.GetReader()
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("Error reading logs: %s", err)
|
||||
} else if c.LogDriverType() != jsonfilelog.Name {
|
||||
logrus.Errorf("Reading logs not implemented for driver %s", c.LogDriverType())
|
||||
logrus.Errorf("Error obtaining the logger %v", err)
|
||||
return err
|
||||
}
|
||||
if _, ok := logDriver.(logger.Reader); !ok {
|
||||
logrus.Errorf("cannot read logs for [%s] driver", logDriver.Name())
|
||||
} else {
|
||||
dec := json.NewDecoder(cLog)
|
||||
for {
|
||||
l := &jsonlog.JSONLog{}
|
||||
if cLog, err := logDriver.(logger.Reader).ReadLog(); err != nil {
|
||||
logrus.Errorf("Error reading logs %v", err)
|
||||
} else {
|
||||
dec := json.NewDecoder(cLog)
|
||||
for {
|
||||
l := &jsonlog.JSONLog{}
|
||||
|
||||
if err := dec.Decode(l); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
logrus.Errorf("Error streaming logs: %s", err)
|
||||
break
|
||||
}
|
||||
if l.Stream == "stdout" && stdout != nil {
|
||||
io.WriteString(stdout, l.Log)
|
||||
}
|
||||
if l.Stream == "stderr" && stderr != nil {
|
||||
io.WriteString(stderr, l.Log)
|
||||
if err := dec.Decode(l); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
logrus.Errorf("Error streaming logs: %s", err)
|
||||
break
|
||||
}
|
||||
if l.Stream == "stdout" && stdout != nil {
|
||||
io.WriteString(stdout, l.Log)
|
||||
}
|
||||
if l.Stream == "stderr" && stderr != nil {
|
||||
io.WriteString(stderr, l.Log)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package logger
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -19,10 +18,6 @@ func (l *TestLoggerJSON) Close() error { return nil }
|
|||
|
||||
func (l *TestLoggerJSON) Name() string { return "json" }
|
||||
|
||||
func (l *TestLoggerJSON) GetReader() (io.Reader, error) {
|
||||
return nil, errors.New("not used in the test")
|
||||
}
|
||||
|
||||
type TestLoggerText struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
@ -36,10 +31,6 @@ func (l *TestLoggerText) Close() error { return nil }
|
|||
|
||||
func (l *TestLoggerText) Name() string { return "text" }
|
||||
|
||||
func (l *TestLoggerText) GetReader() (io.Reader, error) {
|
||||
return nil, errors.New("not used in the test")
|
||||
}
|
||||
|
||||
func TestCopier(t *testing.T) {
|
||||
stdoutLine := "Line that thinks that it is log line from docker stdout"
|
||||
stderrLine := "Line that thinks that it is log line from docker stderr"
|
||||
|
|
|
@ -11,6 +11,9 @@ import (
|
|||
// Creator is a method that builds a logging driver instance with given context
|
||||
type Creator func(Context) (Logger, error)
|
||||
|
||||
//LogOptValidator is a method that validates the log opts provided
|
||||
type LogOptValidator func(cfg map[string]string) error
|
||||
|
||||
// Context provides enough information for a logging driver to do its function
|
||||
type Context struct {
|
||||
Config map[string]string
|
||||
|
@ -42,8 +45,9 @@ func (ctx *Context) Command() string {
|
|||
}
|
||||
|
||||
type logdriverFactory struct {
|
||||
registry map[string]Creator
|
||||
m sync.Mutex
|
||||
registry map[string]Creator
|
||||
optValidator map[string]LogOptValidator
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
func (lf *logdriverFactory) register(name string, c Creator) error {
|
||||
|
@ -57,6 +61,17 @@ func (lf *logdriverFactory) register(name string, c Creator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error {
|
||||
lf.m.Lock()
|
||||
defer lf.m.Unlock()
|
||||
|
||||
if _, ok := lf.optValidator[name]; ok {
|
||||
return fmt.Errorf("logger: log driver named '%s' is already registered", name)
|
||||
}
|
||||
lf.optValidator[name] = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lf *logdriverFactory) get(name string) (Creator, error) {
|
||||
lf.m.Lock()
|
||||
defer lf.m.Unlock()
|
||||
|
@ -68,7 +83,15 @@ func (lf *logdriverFactory) get(name string) (Creator, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
var factory = &logdriverFactory{registry: make(map[string]Creator)} // global factory instance
|
||||
func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator {
|
||||
lf.m.Lock()
|
||||
defer lf.m.Unlock()
|
||||
|
||||
c, _ := lf.optValidator[name]
|
||||
return c
|
||||
}
|
||||
|
||||
var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance
|
||||
|
||||
// RegisterLogDriver registers the given logging driver builder with given logging
|
||||
// driver name.
|
||||
|
@ -76,7 +99,19 @@ func RegisterLogDriver(name string, c Creator) error {
|
|||
return factory.register(name, c)
|
||||
}
|
||||
|
||||
func RegisterLogOptValidator(name string, l LogOptValidator) error {
|
||||
return factory.registerLogOptValidator(name, l)
|
||||
}
|
||||
|
||||
// GetLogDriver provides the logging driver builder for a logging driver name.
|
||||
func GetLogDriver(name string) (Creator, error) {
|
||||
return factory.get(name)
|
||||
}
|
||||
|
||||
func ValidateLogOpts(name string, cfg map[string]string) error {
|
||||
l := factory.getLogOptValidator(name)
|
||||
if l != nil {
|
||||
return l(cfg)
|
||||
}
|
||||
return fmt.Errorf("Log Opts are not valid for [%s] driver", name)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package fluentd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
|
@ -38,6 +38,9 @@ func init() {
|
|||
if err := logger.RegisterLogDriver(name, New); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func parseConfig(ctx logger.Context) (string, int, string, error) {
|
||||
|
@ -116,6 +119,18 @@ func (f *Fluentd) Log(msg *logger.Message) error {
|
|||
return f.writer.PostWithTime(f.tag, msg.Timestamp, data)
|
||||
}
|
||||
|
||||
func ValidateLogOpt(cfg map[string]string) error {
|
||||
for key := range cfg {
|
||||
switch key {
|
||||
case "fluentd-address":
|
||||
case "fluentd-tag":
|
||||
default:
|
||||
return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fluentd) Close() error {
|
||||
return f.writer.Close()
|
||||
}
|
||||
|
@ -123,7 +138,3 @@ func (f *Fluentd) Close() error {
|
|||
func (f *Fluentd) Name() string {
|
||||
return name
|
||||
}
|
||||
|
||||
func (s *Fluentd) GetReader() (io.Reader, error) {
|
||||
return nil, logger.ReadLogsNotSupported
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ package gelf
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"time"
|
||||
|
@ -39,6 +38,9 @@ func init() {
|
|||
if err := logger.RegisterLogDriver(name, New); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func New(ctx logger.Context) (logger.Logger, error) {
|
||||
|
@ -113,10 +115,6 @@ func (s *GelfLogger) Log(msg *logger.Message) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *GelfLogger) GetReader() (io.Reader, error) {
|
||||
return nil, logger.ReadLogsNotSupported
|
||||
}
|
||||
|
||||
func (s *GelfLogger) Close() error {
|
||||
return s.writer.Close()
|
||||
}
|
||||
|
@ -125,6 +123,18 @@ func (s *GelfLogger) Name() string {
|
|||
return name
|
||||
}
|
||||
|
||||
func ValidateLogOpt(cfg map[string]string) error {
|
||||
for key := range cfg {
|
||||
switch key {
|
||||
case "gelf-address":
|
||||
case "gelf-tag":
|
||||
default:
|
||||
return fmt.Errorf("unknown log opt '%s' for gelf log driver", key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAddress(address string) (string, error) {
|
||||
if urlutil.IsTransportURL(address) {
|
||||
url, err := url.Parse(address)
|
||||
|
|
|
@ -4,7 +4,6 @@ package journald
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/coreos/go-systemd/journal"
|
||||
|
@ -54,7 +53,3 @@ func (s *Journald) Close() error {
|
|||
func (s *Journald) Name() string {
|
||||
return name
|
||||
}
|
||||
|
||||
func (s *Journald) GetReader() (io.Reader, error) {
|
||||
return nil, logger.ReadLogsNotSupported
|
||||
}
|
||||
|
|
|
@ -2,14 +2,17 @@ package jsonfilelog
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/daemon/logger"
|
||||
"github.com/docker/docker/pkg/jsonlog"
|
||||
"github.com/docker/docker/pkg/timeutils"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -19,17 +22,21 @@ const (
|
|||
// JSONFileLogger is Logger implementation for default docker logging:
|
||||
// JSON objects to file
|
||||
type JSONFileLogger struct {
|
||||
buf *bytes.Buffer
|
||||
f *os.File // store for closing
|
||||
mu sync.Mutex // protects buffer
|
||||
|
||||
ctx logger.Context
|
||||
buf *bytes.Buffer
|
||||
f *os.File // store for closing
|
||||
mu sync.Mutex // protects buffer
|
||||
capacity int64 //maximum size of each file
|
||||
n int //maximum number of files
|
||||
ctx logger.Context
|
||||
}
|
||||
|
||||
func init() {
|
||||
if err := logger.RegisterLogDriver(Name, New); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates new JSONFileLogger which writes to filename
|
||||
|
@ -38,10 +45,30 @@ func New(ctx logger.Context) (logger.Logger, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var capval int64 = -1
|
||||
if capacity, ok := ctx.Config["max-size"]; ok {
|
||||
var err error
|
||||
capval, err = units.FromHumanSize(capacity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var maxFiles int = 1
|
||||
if maxFileString, ok := ctx.Config["max-file"]; ok {
|
||||
maxFiles, err = strconv.Atoi(maxFileString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if maxFiles < 1 {
|
||||
return nil, fmt.Errorf("max-files cannot be less than 1.")
|
||||
}
|
||||
}
|
||||
return &JSONFileLogger{
|
||||
f: log,
|
||||
buf: bytes.NewBuffer(nil),
|
||||
ctx: ctx,
|
||||
f: log,
|
||||
buf: bytes.NewBuffer(nil),
|
||||
ctx: ctx,
|
||||
capacity: capval,
|
||||
n: maxFiles,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -59,17 +86,102 @@ func (l *JSONFileLogger) Log(msg *logger.Message) error {
|
|||
return err
|
||||
}
|
||||
l.buf.WriteByte('\n')
|
||||
_, err = l.buf.WriteTo(l.f)
|
||||
_, err = writeLog(l)
|
||||
return err
|
||||
}
|
||||
|
||||
func writeLog(l *JSONFileLogger) (int64, error) {
|
||||
if l.capacity == -1 {
|
||||
return writeToBuf(l)
|
||||
}
|
||||
meta, err := l.f.Stat()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if meta.Size() >= l.capacity {
|
||||
name := l.f.Name()
|
||||
if err := l.f.Close(); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if err := rotate(name, l.n); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
l.f = file
|
||||
}
|
||||
return writeToBuf(l)
|
||||
}
|
||||
|
||||
func writeToBuf(l *JSONFileLogger) (int64, error) {
|
||||
i, err := l.buf.WriteTo(l.f)
|
||||
if err != nil {
|
||||
// this buffer is screwed, replace it with another to avoid races
|
||||
l.buf = bytes.NewBuffer(nil)
|
||||
}
|
||||
return i, err
|
||||
}
|
||||
|
||||
func rotate(name string, n int) error {
|
||||
if n < 2 {
|
||||
return nil
|
||||
}
|
||||
for i := n - 1; i > 1; i-- {
|
||||
oldFile := name + "." + strconv.Itoa(i)
|
||||
replacingFile := name + "." + strconv.Itoa(i-1)
|
||||
if err := backup(oldFile, replacingFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := backup(name+".1", name); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *JSONFileLogger) GetReader() (io.Reader, error) {
|
||||
return os.Open(l.ctx.LogPath)
|
||||
func backup(old, curr string) error {
|
||||
if _, err := os.Stat(old); !os.IsNotExist(err) {
|
||||
err := os.Remove(old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(curr); os.IsNotExist(err) {
|
||||
if f, err := os.Create(curr); err != nil {
|
||||
return err
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
return os.Rename(curr, old)
|
||||
}
|
||||
|
||||
func ValidateLogOpt(cfg map[string]string) error {
|
||||
for key := range cfg {
|
||||
switch key {
|
||||
case "max-file":
|
||||
case "max-size":
|
||||
default:
|
||||
return fmt.Errorf("unknown log opt '%s' for json-file log driver", key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *JSONFileLogger) ReadLog(args ...string) (io.Reader, error) {
|
||||
pth := l.ctx.LogPath
|
||||
if len(args) > 0 {
|
||||
//check if args[0] is an integer index
|
||||
index, err := strconv.ParseInt(args[0], 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if index > 0 {
|
||||
pth = pth + "." + args[0]
|
||||
}
|
||||
}
|
||||
return os.Open(pth)
|
||||
}
|
||||
|
||||
func (l *JSONFileLogger) LogPath() string {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -84,3 +85,67 @@ func BenchmarkJSONFileLogger(b *testing.B) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONFileLoggerWithOpts(t *testing.T) {
|
||||
cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
|
||||
tmp, err := ioutil.TempDir("", "docker-logger-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
filename := filepath.Join(tmp, "container.log")
|
||||
config := map[string]string{"max-file": "2", "max-size": "1k"}
|
||||
l, err := New(logger.Context{
|
||||
ContainerID: cid,
|
||||
LogPath: filename,
|
||||
Config: config,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
for i := 0; i < 20; i++ {
|
||||
if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
res, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
penUlt, err := ioutil.ReadFile(filename + ".1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
`
|
||||
expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
||||
`
|
||||
|
||||
if string(res) != expected {
|
||||
t.Fatalf("Wrong log content: %q, expected %q", res, expected)
|
||||
}
|
||||
if string(penUlt) != expectedPenultimate {
|
||||
t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,5 +21,9 @@ type Logger interface {
|
|||
Log(*Message) error
|
||||
Name() string
|
||||
Close() error
|
||||
GetReader() (io.Reader, error)
|
||||
}
|
||||
|
||||
//Reader is an interface for docker logging drivers that support reading
|
||||
type Reader interface {
|
||||
ReadLog(args ...string) (io.Reader, error)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ package syslog
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"fmt"
|
||||
"log/syslog"
|
||||
"net"
|
||||
"net/url"
|
||||
|
@ -51,6 +51,9 @@ func init() {
|
|||
if err := logger.RegisterLogDriver(name, New); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func New(ctx logger.Context) (logger.Logger, error) {
|
||||
|
@ -99,10 +102,6 @@ func (s *Syslog) Name() string {
|
|||
return name
|
||||
}
|
||||
|
||||
func (s *Syslog) GetReader() (io.Reader, error) {
|
||||
return nil, logger.ReadLogsNotSupported
|
||||
}
|
||||
|
||||
func parseAddress(address string) (string, string, error) {
|
||||
if urlutil.IsTransportURL(address) {
|
||||
url, err := url.Parse(address)
|
||||
|
@ -133,6 +132,19 @@ func parseAddress(address string) (string, string, error) {
|
|||
return "", "", nil
|
||||
}
|
||||
|
||||
func ValidateLogOpt(cfg map[string]string) error {
|
||||
for key := range cfg {
|
||||
switch key {
|
||||
case "syslog-address":
|
||||
case "syslog-tag":
|
||||
case "syslog-facility":
|
||||
default:
|
||||
return fmt.Errorf("unknown log opt '%s' for syslog log driver", key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFacility(facility string) (syslog.Priority, error) {
|
||||
if facility == "" {
|
||||
return syslog.LOG_DAEMON, nil
|
||||
|
|
126
daemon/logs.go
126
daemon/logs.go
|
@ -12,6 +12,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/daemon/logger"
|
||||
"github.com/docker/docker/daemon/logger/jsonfilelog"
|
||||
"github.com/docker/docker/pkg/jsonlog"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
|
@ -40,7 +41,7 @@ func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) er
|
|||
format = timeutils.RFC3339NanoFixed
|
||||
}
|
||||
if config.Tail == "" {
|
||||
config.Tail = "all"
|
||||
config.Tail = "latest"
|
||||
}
|
||||
|
||||
container, err := daemon.Get(name)
|
||||
|
@ -62,13 +63,29 @@ func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) er
|
|||
if container.LogDriverType() != jsonfilelog.Name {
|
||||
return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver")
|
||||
}
|
||||
|
||||
maxFile := 1
|
||||
container.readHostConfig()
|
||||
cfg := container.getLogConfig()
|
||||
conf := cfg.Config
|
||||
if val, ok := conf["max-file"]; ok {
|
||||
var err error
|
||||
maxFile, err = strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading max-file value: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
logDriver, err := container.getLogger()
|
||||
cLog, err := logDriver.GetReader()
|
||||
if err != nil {
|
||||
logrus.Errorf("Error reading logs: %s", err)
|
||||
return err
|
||||
}
|
||||
_, ok := logDriver.(logger.Reader)
|
||||
if !ok {
|
||||
logrus.Errorf("Cannot read logs of the [%s] driver", logDriver.Name())
|
||||
} else {
|
||||
// json-file driver
|
||||
if config.Tail != "all" {
|
||||
if config.Tail != "all" && config.Tail != "latest" {
|
||||
var err error
|
||||
lines, err = strconv.Atoi(config.Tail)
|
||||
if err != nil {
|
||||
|
@ -78,42 +95,50 @@ func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) er
|
|||
}
|
||||
|
||||
if lines != 0 {
|
||||
if lines > 0 {
|
||||
f := cLog.(*os.File)
|
||||
ls, err := tailfile.TailFile(f, lines)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmp := bytes.NewBuffer([]byte{})
|
||||
for _, l := range ls {
|
||||
fmt.Fprintf(tmp, "%s\n", l)
|
||||
}
|
||||
cLog = tmp
|
||||
n := maxFile
|
||||
if config.Tail == "latest" && config.Since.IsZero() {
|
||||
n = 1
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(cLog)
|
||||
l := &jsonlog.JSONLog{}
|
||||
for {
|
||||
l.Reset()
|
||||
if err := dec.Decode(l); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
logrus.Errorf("Error streaming logs: %s", err)
|
||||
before := false
|
||||
for i := n; i > 0; i-- {
|
||||
if before {
|
||||
break
|
||||
}
|
||||
logLine := l.Log
|
||||
if !config.Since.IsZero() && l.Created.Before(config.Since) {
|
||||
cLog, err := getReader(logDriver, i, n, lines)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error reading %d log file: %v", i-1, err)
|
||||
continue
|
||||
}
|
||||
if config.Timestamps {
|
||||
// format can be "" or time format, so here can't be error
|
||||
logLine, _ = l.Format(format)
|
||||
//if lines are specified, then iterate only once
|
||||
if lines > 0 {
|
||||
i = 1
|
||||
} else { // if lines are not specified, cLog is a file, It needs to be closed
|
||||
defer cLog.(*os.File).Close()
|
||||
}
|
||||
if l.Stream == "stdout" && config.UseStdout {
|
||||
io.WriteString(outStream, logLine)
|
||||
}
|
||||
if l.Stream == "stderr" && config.UseStderr {
|
||||
io.WriteString(errStream, logLine)
|
||||
dec := json.NewDecoder(cLog)
|
||||
l := &jsonlog.JSONLog{}
|
||||
for {
|
||||
l.Reset()
|
||||
if err := dec.Decode(l); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
logrus.Errorf("Error streaming logs: %s", err)
|
||||
break
|
||||
}
|
||||
logLine := l.Log
|
||||
if !config.Since.IsZero() && l.Created.Before(config.Since) {
|
||||
continue
|
||||
}
|
||||
if config.Timestamps {
|
||||
// format can be "" or time format, so here can't be error
|
||||
logLine, _ = l.Format(format)
|
||||
}
|
||||
if l.Stream == "stdout" && config.UseStdout {
|
||||
io.WriteString(outStream, logLine)
|
||||
}
|
||||
if l.Stream == "stderr" && config.UseStderr {
|
||||
io.WriteString(errStream, logLine)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -177,3 +202,36 @@ func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) er
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReader(logDriver logger.Logger, fileIndex, maxFiles, lines int) (io.Reader, error) {
|
||||
if lines <= 0 {
|
||||
index := strconv.Itoa(fileIndex - 1)
|
||||
cLog, err := logDriver.(logger.Reader).ReadLog(index)
|
||||
return cLog, err
|
||||
}
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
remaining := lines
|
||||
for i := 0; i < maxFiles; i++ {
|
||||
index := strconv.Itoa(i)
|
||||
cLog, err := logDriver.(logger.Reader).ReadLog(index)
|
||||
if err != nil {
|
||||
return buf, err
|
||||
}
|
||||
f := cLog.(*os.File)
|
||||
ls, err := tailfile.TailFile(f, remaining)
|
||||
if err != nil {
|
||||
return buf, err
|
||||
}
|
||||
tmp := bytes.NewBuffer([]byte{})
|
||||
for _, l := range ls {
|
||||
fmt.Fprintf(tmp, "%s\n", l)
|
||||
}
|
||||
tmp.ReadFrom(buf)
|
||||
buf = tmp
|
||||
if len(ls) == remaining {
|
||||
return buf, nil
|
||||
}
|
||||
remaining = remaining - len(ls)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/docker/docker/autogen/dockerversion"
|
||||
"github.com/docker/docker/cliconfig"
|
||||
"github.com/docker/docker/daemon"
|
||||
"github.com/docker/docker/daemon/logger"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/pidfile"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
|
@ -94,6 +95,12 @@ func mainDaemon() {
|
|||
logrus.Fatalf("Failed to set umask: %v", err)
|
||||
}
|
||||
|
||||
if len(daemonCfg.LogConfig.Config) > 0 {
|
||||
if err := logger.ValidateLogOpts(daemonCfg.LogConfig.Type, daemonCfg.LogConfig.Config); err != nil {
|
||||
logrus.Fatalf("Failed to set log opts: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var pfile *pidfile.PidFile
|
||||
if daemonCfg.Pidfile != "" {
|
||||
pf, err := pidfile.New(daemonCfg.Pidfile)
|
||||
|
|
|
@ -29,7 +29,7 @@ The `docker logs --follow` command will continue streaming the new output from
|
|||
the container's `STDOUT` and `STDERR`.
|
||||
|
||||
Passing a negative number or a non-integer to `--tail` is invalid and the
|
||||
value is set to `all` in that case. This behavior may change in the future.
|
||||
value is set to `latest` in that case.
|
||||
|
||||
The `docker logs --timestamp` commands will add an RFC3339Nano
|
||||
timestamp, for example `2014-09-16T06:17:46.000000000Z`, to each
|
||||
|
|
|
@ -17,7 +17,7 @@ container's logging driver. The following options are supported:
|
|||
|
||||
| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. |
|
||||
|-------------|-------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `json-file` | Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver. |
|
||||
| `json-file` | Default logging driver for Docker. Writes JSON messages to file. |
|
||||
| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. |
|
||||
| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. |
|
||||
| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. |
|
||||
|
@ -25,6 +25,20 @@ container's logging driver. The following options are supported:
|
|||
|
||||
The `docker logs`command is available only for the `json-file` logging driver.
|
||||
|
||||
### The json-file options
|
||||
|
||||
The following logging options are supported for the `json-file` logging driver:
|
||||
|
||||
--log-opt max-size=[0-9+][k|m|g]
|
||||
--log-opt max-file=[0-9+]
|
||||
|
||||
Logs that reach `max-size` are rolled over. You can set the size in kilobytes(k), megabytes(m), or gigabytes(g). eg `--log-opt max-size=50m`. If `max-size` is not set, then logs are not rolled over.
|
||||
|
||||
|
||||
`max-file` specifies the maximum number of files that a log is rolled over before being discarded. eg `--log-opt max-file=100`. If `max-size` is not set, then `max-file` is not honored.
|
||||
|
||||
If `max-size` and `max-file` are set, `docker logs` only returns the log lines from the newest log file.
|
||||
|
||||
### The syslog options
|
||||
|
||||
The following logging options are supported for the `syslog` logging driver:
|
||||
|
|
|
@ -425,7 +425,6 @@ func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]st
|
|||
if loggingDriver == "none" && len(loggingOpts) > 0 {
|
||||
return map[string]string{}, fmt.Errorf("Invalid logging opts for driver %s", loggingDriver)
|
||||
}
|
||||
//TODO - validation step
|
||||
return loggingOptsMap, nil
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue