Browse Source

add UDP GELF logging-driver

allows to send container logs to Graylog or Logstash.

Signed-off-by: Marius Sturm <marius@graylog.com>
Marius Sturm 10 years ago
parent
commit
96d06e106f

+ 8 - 3
daemon/container.go

@@ -689,9 +689,14 @@ func (container *Container) getLogger() (logger.Logger, error) {
 		return nil, fmt.Errorf("Failed to get logging factory: %v", err)
 		return nil, fmt.Errorf("Failed to get logging factory: %v", err)
 	}
 	}
 	ctx := logger.Context{
 	ctx := logger.Context{
-		Config:        cfg.Config,
-		ContainerID:   container.ID,
-		ContainerName: container.Name,
+		Config:              cfg.Config,
+		ContainerID:         container.ID,
+		ContainerName:       container.Name,
+		ContainerEntrypoint: container.Path,
+		ContainerArgs:       container.Args,
+		ContainerImageID:    container.ImageID,
+		ContainerImageName:  container.Config.Image,
+		ContainerCreated:    container.Created,
 	}
 	}
 
 
 	// Set logging file for "json-logger"
 	// Set logging file for "json-logger"

+ 1 - 0
daemon/logdrivers_linux.go

@@ -3,6 +3,7 @@ package daemon
 // Importing packages here only to make sure their init gets called and
 // Importing packages here only to make sure their init gets called and
 // therefore they register themselves to the logdriver factory.
 // therefore they register themselves to the logdriver factory.
 import (
 import (
+	_ "github.com/docker/docker/daemon/logger/gelf"
 	_ "github.com/docker/docker/daemon/logger/journald"
 	_ "github.com/docker/docker/daemon/logger/journald"
 	_ "github.com/docker/docker/daemon/logger/jsonfilelog"
 	_ "github.com/docker/docker/daemon/logger/jsonfilelog"
 	_ "github.com/docker/docker/daemon/logger/syslog"
 	_ "github.com/docker/docker/daemon/logger/syslog"

+ 29 - 4
daemon/logger/factory.go

@@ -2,7 +2,10 @@ package logger
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"os"
+	"strings"
 	"sync"
 	"sync"
+	"time"
 )
 )
 
 
 // Creator is a method that builds a logging driver instance with given context
 // Creator is a method that builds a logging driver instance with given context
@@ -10,10 +13,32 @@ type Creator func(Context) (Logger, error)
 
 
 // Context provides enough information for a logging driver to do its function
 // Context provides enough information for a logging driver to do its function
 type Context struct {
 type Context struct {
-	Config        map[string]string
-	ContainerID   string
-	ContainerName string
-	LogPath       string
+	Config              map[string]string
+	ContainerID         string
+	ContainerName       string
+	ContainerEntrypoint string
+	ContainerArgs       []string
+	ContainerImageID    string
+	ContainerImageName  string
+	ContainerCreated    time.Time
+	LogPath             string
+}
+
+func (ctx *Context) Hostname() (string, error) {
+	hostname, err := os.Hostname()
+	if err != nil {
+		return "", fmt.Errorf("logger: can not resolve hostname: %v", err)
+	}
+	return hostname, nil
+}
+
+func (ctx *Context) Command() string {
+	terms := []string{ctx.ContainerEntrypoint}
+	for _, arg := range ctx.ContainerArgs {
+		terms = append(terms, arg)
+	}
+	command := strings.Join(terms, " ")
+	return command
 }
 }
 
 
 type logdriverFactory struct {
 type logdriverFactory struct {

+ 149 - 0
daemon/logger/gelf/gelf.go

@@ -0,0 +1,149 @@
+// +build linux
+
+package gelf
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"net/url"
+	"time"
+
+	"github.com/Graylog2/go-gelf/gelf"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/daemon/logger"
+	"github.com/docker/docker/pkg/urlutil"
+)
+
+const name = "gelf"
+
+type GelfLogger struct {
+	writer *gelf.Writer
+	ctx    logger.Context
+	fields GelfFields
+}
+
+type GelfFields struct {
+	hostname      string
+	containerId   string
+	containerName string
+	imageId       string
+	imageName     string
+	command       string
+	tag           string
+	created       time.Time
+}
+
+func init() {
+	if err := logger.RegisterLogDriver(name, New); err != nil {
+		logrus.Fatal(err)
+	}
+}
+
+func New(ctx logger.Context) (logger.Logger, error) {
+	// parse gelf address
+	address, err := parseAddress(ctx.Config["gelf-address"])
+	if err != nil {
+		return nil, err
+	}
+
+	// collect extra data for GELF message
+	hostname, err := ctx.Hostname()
+	if err != nil {
+		return nil, fmt.Errorf("gelf: cannot access hostname to set source field")
+	}
+
+	// remove trailing slash from container name
+	containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/")
+
+	fields := GelfFields{
+		hostname:      hostname,
+		containerId:   ctx.ContainerID,
+		containerName: string(containerName),
+		imageId:       ctx.ContainerImageID,
+		imageName:     ctx.ContainerImageName,
+		command:       ctx.Command(),
+		tag:           ctx.Config["gelf-tag"],
+		created:       ctx.ContainerCreated,
+	}
+
+	// create new gelfWriter
+	gelfWriter, err := gelf.NewWriter(address)
+	if err != nil {
+		return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err)
+	}
+
+	return &GelfLogger{
+		writer: gelfWriter,
+		ctx:    ctx,
+		fields: fields,
+	}, nil
+}
+
+func (s *GelfLogger) Log(msg *logger.Message) error {
+	// remove trailing and leading whitespace
+	short := bytes.TrimSpace([]byte(msg.Line))
+
+	level := gelf.LOG_INFO
+	if msg.Source == "stderr" {
+		level = gelf.LOG_ERR
+	}
+
+	m := gelf.Message{
+		Version:  "1.1",
+		Host:     s.fields.hostname,
+		Short:    string(short),
+		TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0,
+		Level:    level,
+		Extra: map[string]interface{}{
+			"_container_id":   s.fields.containerId,
+			"_container_name": s.fields.containerName,
+			"_image_id":       s.fields.imageId,
+			"_image_name":     s.fields.imageName,
+			"_command":        s.fields.command,
+			"_tag":            s.fields.tag,
+			"_created":        s.fields.created,
+		},
+	}
+
+	if err := s.writer.WriteMessage(&m); err != nil {
+		return fmt.Errorf("gelf: cannot send GELF message: %v", err)
+	}
+	return nil
+}
+
+func (s *GelfLogger) GetReader() (io.Reader, error) {
+	return nil, logger.ReadLogsNotSupported
+}
+
+func (s *GelfLogger) Close() error {
+	return s.writer.Close()
+}
+
+func (s *GelfLogger) Name() string {
+	return name
+}
+
+func parseAddress(address string) (string, error) {
+	if urlutil.IsTransportURL(address) {
+		url, err := url.Parse(address)
+		if err != nil {
+			return "", err
+		}
+
+		// we support only udp
+		if url.Scheme != "udp" {
+			return "", fmt.Errorf("gelf: endpoint needs to be UDP")
+		}
+
+		// get host and port
+		if _, _, err = net.SplitHostPort(url.Host); err != nil {
+			return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port")
+		}
+
+		return url.Host, nil
+	}
+
+	return "", nil
+}

+ 3 - 0
daemon/logger/gelf/gelf_unsupported.go

@@ -0,0 +1,3 @@
+// +build !linux
+
+package gelf

+ 1 - 1
docs/man/docker-create.1.md

@@ -155,7 +155,7 @@ two memory nodes.
 **--lxc-conf**=[]
 **--lxc-conf**=[]
    (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
    (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
 
 
-**--log-driver**="|*json-file*|*syslog*|*journald*|*none*"
+**--log-driver**="|*json-file*|*syslog*|*journald*|*gelf*|*none*"
   Logging driver for container. Default is defined by daemon `--log-driver` flag.
   Logging driver for container. Default is defined by daemon `--log-driver` flag.
   **Warning**: `docker logs` command works only for `json-file` logging driver.
   **Warning**: `docker logs` command works only for `json-file` logging driver.
 
 

+ 1 - 1
docs/man/docker-run.1.md

@@ -252,7 +252,7 @@ which interface and port to use.
 **--lxc-conf**=[]
 **--lxc-conf**=[]
    (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
    (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
 
 
-**--log-driver**="|*json-file*|*syslog*|*journald*|*none*"
+**--log-driver**="|*json-file*|*syslog*|*journald*|*gelf*|*none*"
   Logging driver for container. Default is defined by daemon `--log-driver` flag.
   Logging driver for container. Default is defined by daemon `--log-driver` flag.
   **Warning**: `docker logs` command works only for `json-file` logging driver.
   **Warning**: `docker logs` command works only for `json-file` logging driver.
 
 

+ 1 - 1
docs/man/docker.1.md

@@ -103,7 +103,7 @@ unix://[/path/to/socket] to use.
 **--label**="[]"
 **--label**="[]"
   Set key=value labels to the daemon (displayed in `docker info`)
   Set key=value labels to the daemon (displayed in `docker info`)
 
 
-**--log-driver**="*json-file*|*syslog*|*journald*|*none*"
+**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*none*"
   Default driver for container logs. Default is `json-file`.
   Default driver for container logs. Default is `json-file`.
   **Warning**: `docker logs` command works only for `json-file` logging driver.
   **Warning**: `docker logs` command works only for `json-file` logging driver.
 
 

+ 1 - 1
docs/sources/reference/api/docker_remote_api_v1.20.md

@@ -269,7 +269,7 @@ Json Parameters:
         systems, such as SELinux.
         systems, such as SELinux.
     -   **LogConfig** - Log configuration for the container, specified as a JSON object in the form
     -   **LogConfig** - Log configuration for the container, specified as a JSON object in the form
           `{ "Type": "<driver_name>", "Config": {"key1": "val1"}}`.
           `{ "Type": "<driver_name>", "Config": {"key1": "val1"}}`.
-          Available types: `json-file`, `syslog`, `journald`, `none`.
+          Available types: `json-file`, `syslog`, `journald`, `gelf`, `none`.
           `json-file` logging driver.
           `json-file` logging driver.
     -   **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist.
     -   **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist.
 
 

+ 19 - 0
docs/sources/reference/run.md

@@ -906,6 +906,25 @@ reference documentation.
 
 
 The following logging options are supported for this logging driver: [none]
 The following logging options are supported for this logging driver: [none]
 
 
+#### Logging driver: gelf
+
+Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint like
+Graylog or Logstash. The `docker logs` command is not available for this logging driver.
+
+The GELF logging driver supports the following options:
+
+    --log-opt gelf-address=udp://host:port
+    --log-opt gelf-tag="database"
+
+The `gelf-address` option specifies the remote GELF server address that the
+driver connects to. Currently, only `udp` is supported as the transport and you must
+specify a `port` value. The following example shows how to connect the `gelf`
+driver to a GELF remote server at `192.168.0.42` on port `12201`
+
+    $ docker run --log-driver=gelf --log-opt gelf-address=udp://192.168.0.42:12201
+
+The `gelf-tag` option specifies a tag for easy container identification.
+
 ## Overriding Dockerfile image defaults
 ## Overriding Dockerfile image defaults
 
 
 When a developer builds an image from a [*Dockerfile*](/reference/builder)
 When a developer builds an image from a [*Dockerfile*](/reference/builder)

+ 1 - 0
hack/vendor.sh

@@ -75,3 +75,4 @@ clone git github.com/coreos/go-systemd v2
 clone git github.com/godbus/dbus v2
 clone git github.com/godbus/dbus v2
 clone git github.com/syndtr/gocapability 66ef2aa7a23ba682594e2b6f74cf40c0692b49fb
 clone git github.com/syndtr/gocapability 66ef2aa7a23ba682594e2b6f74cf40c0692b49fb
 clone git github.com/golang/protobuf 655cdfa588ea
 clone git github.com/golang/protobuf 655cdfa588ea
+clone git github.com/Graylog2/go-gelf 6c62a85f1d47a67f2a5144c0e745b325889a8120

+ 2 - 0
vendor/src/github.com/Graylog2/go-gelf/.gitignore

@@ -0,0 +1,2 @@
+*~
+.#*

+ 21 - 0
vendor/src/github.com/Graylog2/go-gelf/LICENSE

@@ -0,0 +1,21 @@
+Copyright 2012 SocialCode
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+

+ 76 - 0
vendor/src/github.com/Graylog2/go-gelf/README.md

@@ -0,0 +1,76 @@
+go-gelf - GELF library and writer for Go
+========================================
+
+GELF is graylog2's UDP logging format.  This library provides an API
+that applications can use to log messages directly to a graylog2
+server, along with an `io.Writer` that can be use to redirect the
+standard library's log messages (or `os.Stdout`), to a graylog2 server.
+
+Installing
+----------
+
+go-gelf is go get-able:
+
+	go get github.com/Graylog2/go-gelf/gelf
+
+Usage
+-----
+
+The easiest way to integrate graylog logging into your go app is by
+having your `main` function (or even `init`) call `log.SetOutput()`.
+By using an `io.MultiWriter`, we can log to both stdout and graylog -
+giving us both centralized and local logs.  (Redundancy is nice).
+
+	package main
+
+	import (
+		"flag"
+		"github.com/Graylog2/go-gelf/gelf"
+		"io"
+		"log"
+		"os"
+	)
+
+	func main() {
+		var graylogAddr string
+
+		flag.StringVar(&graylogAddr, "graylog", "", "graylog server addr")
+		flag.Parse()
+
+		if graylogAddr != "" {
+			gelfWriter, err := gelf.NewWriter(graylogAddr)
+			if err != nil {
+				log.Fatalf("gelf.NewWriter: %s", err)
+			}
+			// log to both stderr and graylog2
+			log.SetOutput(io.MultiWriter(os.Stderr, gelfWriter))
+			log.Printf("logging to stderr & graylog2@'%s'", graylogAddr)
+		}
+
+		// From here on out, any calls to log.Print* functions
+		// will appear on stdout, and be sent over UDP to the
+		// specified Graylog2 server.
+
+		log.Printf("Hello gray World")
+
+		// ...
+	}
+
+The above program can be invoked as:
+
+	go run test.go -graylog=localhost:12201
+
+Because GELF messages are sent over UDP, graylog server availability
+doesn't impact application performance or response time.  There is a
+small, fixed overhead per log call, regardless of whether the target
+server is reachable or not.
+
+To Do
+-----
+
+- WriteMessage example
+
+License
+-------
+
+go-gelf is offered under the MIT license, see LICENSE for details.

+ 142 - 0
vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go

@@ -0,0 +1,142 @@
+// Copyright 2012 SocialCode. All rights reserved.
+// Use of this source code is governed by the MIT
+// license that can be found in the LICENSE file.
+
+package gelf
+
+import (
+	"bytes"
+	"compress/gzip"
+	"compress/zlib"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"sync"
+)
+
+type Reader struct {
+	mu   sync.Mutex
+	conn net.Conn
+}
+
+func NewReader(addr string) (*Reader, error) {
+	var err error
+	udpAddr, err := net.ResolveUDPAddr("udp", addr)
+	if err != nil {
+		return nil, fmt.Errorf("ResolveUDPAddr('%s'): %s", addr, err)
+	}
+
+	conn, err := net.ListenUDP("udp", udpAddr)
+	if err != nil {
+		return nil, fmt.Errorf("ListenUDP: %s", err)
+	}
+
+	r := new(Reader)
+	r.conn = conn
+	return r, nil
+}
+
+func (r *Reader) Addr() string {
+	return r.conn.LocalAddr().String()
+}
+
+// FIXME: this will discard data if p isn't big enough to hold the
+// full message.
+func (r *Reader) Read(p []byte) (int, error) {
+	msg, err := r.ReadMessage()
+	if err != nil {
+		return -1, err
+	}
+
+	var data string
+
+	if msg.Full == "" {
+		data = msg.Short
+	} else {
+		data = msg.Full
+	}
+
+	return strings.NewReader(data).Read(p)
+}
+
+func (r *Reader) ReadMessage() (*Message, error) {
+	cBuf := make([]byte, ChunkSize)
+	var (
+		err        error
+		n, length  int
+		buf        bytes.Buffer
+		cid, ocid  []byte
+		seq, total uint8
+		cHead      []byte
+		cReader    io.Reader
+		chunks     [][]byte
+	)
+
+	for got := 0; got < 128 && (total == 0 || got < int(total)); got++ {
+		if n, err = r.conn.Read(cBuf); err != nil {
+			return nil, fmt.Errorf("Read: %s", err)
+		}
+		cHead, cBuf = cBuf[:2], cBuf[:n]
+
+		if bytes.Equal(cHead, magicChunked) {
+			//fmt.Printf("chunked %v\n", cBuf[:14])
+			cid, seq, total = cBuf[2:2+8], cBuf[2+8], cBuf[2+8+1]
+			if ocid != nil && !bytes.Equal(cid, ocid) {
+				return nil, fmt.Errorf("out-of-band message %v (awaited %v)", cid, ocid)
+			} else if ocid == nil {
+				ocid = cid
+				chunks = make([][]byte, total)
+			}
+			n = len(cBuf) - chunkedHeaderLen
+			//fmt.Printf("setting chunks[%d]: %d\n", seq, n)
+			chunks[seq] = append(make([]byte, 0, n), cBuf[chunkedHeaderLen:]...)
+			length += n
+		} else { //not chunked
+			if total > 0 {
+				return nil, fmt.Errorf("out-of-band message (not chunked)")
+			}
+			break
+		}
+	}
+	//fmt.Printf("\nchunks: %v\n", chunks)
+
+	if length > 0 {
+		if cap(cBuf) < length {
+			cBuf = append(cBuf, make([]byte, 0, length-cap(cBuf))...)
+		}
+		cBuf = cBuf[:0]
+		for i := range chunks {
+			//fmt.Printf("appending %d %v\n", i, chunks[i])
+			cBuf = append(cBuf, chunks[i]...)
+		}
+		cHead = cBuf[:2]
+	}
+
+	// the data we get from the wire is compressed
+	if bytes.Equal(cHead, magicGzip) {
+		cReader, err = gzip.NewReader(bytes.NewReader(cBuf))
+	} else if cHead[0] == magicZlib[0] &&
+		(int(cHead[0])*256+int(cHead[1]))%31 == 0 {
+		// zlib is slightly more complicated, but correct
+		cReader, err = zlib.NewReader(bytes.NewReader(cBuf))
+	} else {
+		return nil, fmt.Errorf("unknown magic: %x %v", cHead, cHead)
+	}
+
+	if err != nil {
+		return nil, fmt.Errorf("NewReader: %s", err)
+	}
+
+	if _, err = io.Copy(&buf, cReader); err != nil {
+		return nil, fmt.Errorf("io.Copy: %s", err)
+	}
+
+	msg := new(Message)
+	if err := json.Unmarshal(buf.Bytes(), &msg); err != nil {
+		return nil, fmt.Errorf("json.Unmarshal: %s", err)
+	}
+
+	return msg, nil
+}

+ 373 - 0
vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go

@@ -0,0 +1,373 @@
+// Copyright 2012 SocialCode. All rights reserved.
+// Use of this source code is governed by the MIT
+// license that can be found in the LICENSE file.
+
+package gelf
+
+import (
+	"bytes"
+	"compress/flate"
+	"compress/gzip"
+	"compress/zlib"
+	"crypto/rand"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"path"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+)
+
+// Writer implements io.Writer and is used to send both discrete
+// messages to a graylog2 server, or data from a stream-oriented
+// interface (like the functions in log).
+type Writer struct {
+	mu               sync.Mutex
+	conn             net.Conn
+	hostname         string
+	Facility         string // defaults to current process name
+	CompressionLevel int    // one of the consts from compress/flate
+	CompressionType  CompressType
+}
+
+// What compression type the writer should use when sending messages
+// to the graylog2 server
+type CompressType int
+
+const (
+	CompressGzip CompressType = iota
+	CompressZlib
+)
+
+// Message represents the contents of the GELF message.  It is gzipped
+// before sending.
+type Message struct {
+	Version  string                 `json:"version"`
+	Host     string                 `json:"host"`
+	Short    string                 `json:"short_message"`
+	Full     string                 `json:"full_message"`
+	TimeUnix float64                `json:"timestamp"`
+	Level    int32                  `json:"level"`
+	Facility string                 `json:"facility"`
+	Extra    map[string]interface{} `json:"-"`
+}
+
+type innerMessage Message //against circular (Un)MarshalJSON
+
+// Used to control GELF chunking.  Should be less than (MTU - len(UDP
+// header)).
+//
+// TODO: generate dynamically using Path MTU Discovery?
+const (
+	ChunkSize        = 1420
+	chunkedHeaderLen = 12
+	chunkedDataLen   = ChunkSize - chunkedHeaderLen
+)
+
+var (
+	magicChunked = []byte{0x1e, 0x0f}
+	magicZlib    = []byte{0x78}
+	magicGzip    = []byte{0x1f, 0x8b}
+)
+
+// Syslog severity levels
+const (
+  LOG_EMERG   = int32(0)
+  LOG_ALERT   = int32(1)
+  LOG_CRIT    = int32(2)
+  LOG_ERR     = int32(3)
+  LOG_WARNING = int32(4)
+  LOG_NOTICE  = int32(5)
+  LOG_INFO    = int32(6)
+  LOG_DEBUG   = int32(7)
+)
+
+// numChunks returns the number of GELF chunks necessary to transmit
+// the given compressed buffer.
+func numChunks(b []byte) int {
+	lenB := len(b)
+	if lenB <= ChunkSize {
+		return 1
+	}
+	return len(b)/chunkedDataLen + 1
+}
+
+// New returns a new GELF Writer.  This writer can be used to send the
+// output of the standard Go log functions to a central GELF server by
+// passing it to log.SetOutput()
+func NewWriter(addr string) (*Writer, error) {
+	var err error
+	w := new(Writer)
+	w.CompressionLevel = flate.BestSpeed
+
+	if w.conn, err = net.Dial("udp", addr); err != nil {
+		return nil, err
+	}
+	if w.hostname, err = os.Hostname(); err != nil {
+		return nil, err
+	}
+
+	w.Facility = path.Base(os.Args[0])
+
+	return w, nil
+}
+
+// writes the gzip compressed byte array to the connection as a series
+// of GELF chunked messages.  The header format is documented at
+// https://github.com/Graylog2/graylog2-docs/wiki/GELF as:
+//
+//     2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte
+//     total, chunk-data
+func (w *Writer) writeChunked(zBytes []byte) (err error) {
+	b := make([]byte, 0, ChunkSize)
+	buf := bytes.NewBuffer(b)
+	nChunksI := numChunks(zBytes)
+	if nChunksI > 255 {
+		return fmt.Errorf("msg too large, would need %d chunks", nChunksI)
+	}
+	nChunks := uint8(nChunksI)
+	// use urandom to get a unique message id
+	msgId := make([]byte, 8)
+	n, err := io.ReadFull(rand.Reader, msgId)
+	if err != nil || n != 8 {
+		return fmt.Errorf("rand.Reader: %d/%s", n, err)
+	}
+
+	bytesLeft := len(zBytes)
+	for i := uint8(0); i < nChunks; i++ {
+		buf.Reset()
+		// manually write header.  Don't care about
+		// host/network byte order, because the spec only
+		// deals in individual bytes.
+		buf.Write(magicChunked) //magic
+		buf.Write(msgId)
+		buf.WriteByte(i)
+		buf.WriteByte(nChunks)
+		// slice out our chunk from zBytes
+		chunkLen := chunkedDataLen
+		if chunkLen > bytesLeft {
+			chunkLen = bytesLeft
+		}
+		off := int(i) * chunkedDataLen
+		chunk := zBytes[off : off+chunkLen]
+		buf.Write(chunk)
+
+		// write this chunk, and make sure the write was good
+		n, err := w.conn.Write(buf.Bytes())
+		if err != nil {
+			return fmt.Errorf("Write (chunk %d/%d): %s", i,
+				nChunks, err)
+		}
+		if n != len(buf.Bytes()) {
+			return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)",
+				i, nChunks, n, len(buf.Bytes()))
+		}
+
+		bytesLeft -= chunkLen
+	}
+
+	if bytesLeft != 0 {
+		return fmt.Errorf("error: %d bytes left after sending", bytesLeft)
+	}
+	return nil
+}
+
+// WriteMessage sends the specified message to the GELF server
+// specified in the call to New().  It assumes all the fields are
+// filled out appropriately.  In general, clients will want to use
+// Write, rather than WriteMessage.
+func (w *Writer) WriteMessage(m *Message) (err error) {
+	mBytes, err := json.Marshal(m)
+	if err != nil {
+		return
+	}
+
+	var zBuf bytes.Buffer
+	var zw io.WriteCloser
+	switch w.CompressionType {
+	case CompressGzip:
+		zw, err = gzip.NewWriterLevel(&zBuf, w.CompressionLevel)
+	case CompressZlib:
+		zw, err = zlib.NewWriterLevel(&zBuf, w.CompressionLevel)
+	default:
+		panic(fmt.Sprintf("unknown compression type %d",
+			w.CompressionType))
+	}
+	if err != nil {
+		return
+	}
+	if _, err = zw.Write(mBytes); err != nil {
+		return
+	}
+	zw.Close()
+
+	zBytes := zBuf.Bytes()
+	if numChunks(zBytes) > 1 {
+		return w.writeChunked(zBytes)
+	}
+
+	n, err := w.conn.Write(zBytes)
+	if err != nil {
+		return
+	}
+	if n != len(zBytes) {
+		return fmt.Errorf("bad write (%d/%d)", n, len(zBytes))
+	}
+
+	return nil
+}
+
+// Close connection and interrupt blocked Read or Write operations
+func (w *Writer) Close() (error) {
+  return w.conn.Close()
+}
+
+/*
+func (w *Writer) Alert(m string) (err error)
+func (w *Writer) Close() error
+func (w *Writer) Crit(m string) (err error)
+func (w *Writer) Debug(m string) (err error)
+func (w *Writer) Emerg(m string) (err error)
+func (w *Writer) Err(m string) (err error)
+func (w *Writer) Info(m string) (err error)
+func (w *Writer) Notice(m string) (err error)
+func (w *Writer) Warning(m string) (err error)
+*/
+
+// getCaller returns the filename and the line info of a function
+// further down in the call stack.  Passing 0 in as callDepth would
+// return info on the function calling getCallerIgnoringLog, 1 the
+// parent function, and so on.  Any suffixes passed to getCaller are
+// path fragments like "/pkg/log/log.go", and functions in the call
+// stack from that file are ignored.
+func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) {
+	// bump by 1 to ignore the getCaller (this) stackframe
+	callDepth++
+outer:
+	for {
+		var ok bool
+		_, file, line, ok = runtime.Caller(callDepth)
+		if !ok {
+			file = "???"
+			line = 0
+			break
+		}
+
+		for _, s := range suffixesToIgnore {
+			if strings.HasSuffix(file, s) {
+				callDepth++
+				continue outer
+			}
+		}
+		break
+	}
+	return
+}
+
+func getCallerIgnoringLogMulti(callDepth int) (string, int) {
+	// the +1 is to ignore this (getCallerIgnoringLogMulti) frame
+	return getCaller(callDepth+1, "/pkg/log/log.go", "/pkg/io/multi.go")
+}
+
+// Write encodes the given string in a GELF message and sends it to
+// the server specified in New().
+func (w *Writer) Write(p []byte) (n int, err error) {
+
+	// 1 for the function that called us.
+	file, line := getCallerIgnoringLogMulti(1)
+
+	// remove trailing and leading whitespace
+	p = bytes.TrimSpace(p)
+
+	// If there are newlines in the message, use the first line
+	// for the short message and set the full message to the
+	// original input.  If the input has no newlines, stick the
+	// whole thing in Short.
+	short := p
+	full := []byte("")
+	if i := bytes.IndexRune(p, '\n'); i > 0 {
+		short = p[:i]
+		full = p
+	}
+
+	m := Message{
+		Version:  "1.1",
+		Host:     w.hostname,
+		Short:    string(short),
+		Full:     string(full),
+		TimeUnix: float64(time.Now().Unix()),
+		Level:    6, // info
+		Facility: w.Facility,
+		Extra: map[string]interface{}{
+			"_file": file,
+			"_line": line,
+		},
+	}
+
+	if err = w.WriteMessage(&m); err != nil {
+		return 0, err
+	}
+
+	return len(p), nil
+}
+
+func (m *Message) MarshalJSON() ([]byte, error) {
+	var err error
+	var b, eb []byte
+
+	extra := m.Extra
+	b, err = json.Marshal((*innerMessage)(m))
+	m.Extra = extra
+	if err != nil {
+		return nil, err
+	}
+
+	if len(extra) == 0 {
+		return b, nil
+	}
+
+	if eb, err = json.Marshal(extra); err != nil {
+		return nil, err
+	}
+
+	// merge serialized message + serialized extra map
+	b[len(b)-1] = ','
+	return append(b, eb[1:len(eb)]...), nil
+}
+
+func (m *Message) UnmarshalJSON(data []byte) error {
+	i := make(map[string]interface{}, 16)
+	if err := json.Unmarshal(data, &i); err != nil {
+		return err
+	}
+	for k, v := range i {
+		if k[0] == '_' {
+			if m.Extra == nil {
+				m.Extra = make(map[string]interface{}, 1)
+			}
+			m.Extra[k] = v
+			continue
+		}
+		switch k {
+		case "version":
+			m.Version = v.(string)
+		case "host":
+			m.Host = v.(string)
+		case "short_message":
+			m.Short = v.(string)
+		case "full_message":
+			m.Full = v.(string)
+		case "timestamp":
+			m.TimeUnix = v.(float64)
+		case "level":
+			m.Level = int32(v.(float64))
+		case "facility":
+			m.Facility = v.(string)
+		}
+	}
+	return nil
+}

+ 230 - 0
vendor/src/github.com/Graylog2/go-gelf/gelf/writer_test.go

@@ -0,0 +1,230 @@
+// Copyright 2012 SocialCode. All rights reserved.
+// Use of this source code is governed by the MIT
+// license that can be found in the LICENSE file.
+
+package gelf
+
+import (
+	"crypto/rand"
+	"encoding/base64"
+	"fmt"
+	"strings"
+	"testing"
+	"time"
+)
+
+func TestNewWriter(t *testing.T) {
+	w, err := NewWriter("")
+	if err == nil || w != nil {
+		t.Errorf("New didn't fail")
+		return
+	}
+}
+
+func sendAndRecv(msgData string, compress CompressType) (*Message, error) {
+	r, err := NewReader("127.0.0.1:0")
+	if err != nil {
+		return nil, fmt.Errorf("NewReader: %s", err)
+	}
+
+	w, err := NewWriter(r.Addr())
+	if err != nil {
+		return nil, fmt.Errorf("NewWriter: %s", err)
+	}
+	w.CompressionType = compress
+
+	if _, err = w.Write([]byte(msgData)); err != nil {
+		return nil, fmt.Errorf("w.Write: %s", err)
+	}
+
+	return r.ReadMessage()
+}
+
+func sendAndRecvMsg(msg *Message, compress CompressType) (*Message, error) {
+	r, err := NewReader("127.0.0.1:0")
+	if err != nil {
+		return nil, fmt.Errorf("NewReader: %s", err)
+	}
+
+	w, err := NewWriter(r.Addr())
+	if err != nil {
+		return nil, fmt.Errorf("NewWriter: %s", err)
+	}
+	w.CompressionType = compress
+
+	if err = w.WriteMessage(msg); err != nil {
+		return nil, fmt.Errorf("w.Write: %s", err)
+	}
+
+	return r.ReadMessage()
+}
+
+// tests single-message (non-chunked) messages that are split over
+// multiple lines
+func TestWriteSmallMultiLine(t *testing.T) {
+	for _, i := range []CompressType{CompressGzip, CompressZlib} {
+		msgData := "awesomesauce\nbananas"
+
+		msg, err := sendAndRecv(msgData, i)
+		if err != nil {
+			t.Errorf("sendAndRecv: %s", err)
+			return
+		}
+
+		if msg.Short != "awesomesauce" {
+			t.Errorf("msg.Short: expected %s, got %s", msgData, msg.Full)
+			return
+		}
+
+		if msg.Full != msgData {
+			t.Errorf("msg.Full: expected %s, got %s", msgData, msg.Full)
+			return
+		}
+	}
+}
+
+// tests single-message (non-chunked) messages that are a single line long
+func TestWriteSmallOneLine(t *testing.T) {
+	msgData := "some awesome thing\n"
+	msgDataTrunc := msgData[:len(msgData)-1]
+
+	msg, err := sendAndRecv(msgData, CompressGzip)
+	if err != nil {
+		t.Errorf("sendAndRecv: %s", err)
+		return
+	}
+
+	// we should remove the trailing newline
+	if msg.Short != msgDataTrunc {
+		t.Errorf("msg.Short: expected %s, got %s",
+			msgDataTrunc, msg.Short)
+		return
+	}
+
+	if msg.Full != "" {
+		t.Errorf("msg.Full: expected %s, got %s", msgData, msg.Full)
+		return
+	}
+
+	fileExpected := "/go-gelf/gelf/writer_test.go"
+	if !strings.HasSuffix(msg.Extra["_file"].(string), fileExpected) {
+		t.Errorf("msg.File: expected %s, got %s", fileExpected,
+			msg.Extra["_file"].(string))
+		return
+	}
+
+	if len(msg.Extra) != 2 {
+		t.Errorf("extra fields in %v (expect only file and line)", msg.Extra)
+		return
+	}
+}
+
+func TestGetCaller(t *testing.T) {
+	file, line := getCallerIgnoringLogMulti(1000)
+	if line != 0 || file != "???" {
+		t.Errorf("didn't fail 1 %s %d", file, line)
+		return
+	}
+
+	file, _ = getCaller(0)
+	if !strings.HasSuffix(file, "/gelf/writer_test.go") {
+		t.Errorf("not writer_test.go 1? %s", file)
+	}
+
+	file, _ = getCallerIgnoringLogMulti(0)
+	if !strings.HasSuffix(file, "/gelf/writer_test.go") {
+		t.Errorf("not writer_test.go 2? %s", file)
+	}
+}
+
+// tests single-message (chunked) messages
+func TestWriteBigChunked(t *testing.T) {
+	randData := make([]byte, 4096)
+	if _, err := rand.Read(randData); err != nil {
+		t.Errorf("cannot get random data: %s", err)
+		return
+	}
+	msgData := "awesomesauce\n" + base64.StdEncoding.EncodeToString(randData)
+
+	for _, i := range []CompressType{CompressGzip, CompressZlib} {
+		msg, err := sendAndRecv(msgData, i)
+		if err != nil {
+			t.Errorf("sendAndRecv: %s", err)
+			return
+		}
+
+		if msg.Short != "awesomesauce" {
+			t.Errorf("msg.Short: expected %s, got %s", msgData, msg.Full)
+			return
+		}
+
+		if msg.Full != msgData {
+			t.Errorf("msg.Full: expected %s, got %s", msgData, msg.Full)
+			return
+		}
+	}
+}
+
+// tests messages with extra data
+func TestExtraData(t *testing.T) {
+
+	// time.Now().Unix() seems fine, UnixNano() won't roundtrip
+	// through string -> float64 -> int64
+	extra := map[string]interface{}{
+		"_a":    10 * time.Now().Unix(),
+		"C":     9,
+		"_file": "writer_test.go",
+		"_line": 186,
+	}
+
+	short := "quick"
+	full := short + "\nwith more detail"
+	m := Message{
+		Version:  "1.0",
+		Host:     "fake-host",
+		Short:    string(short),
+		Full:     string(full),
+		TimeUnix: float64(time.Now().Unix()),
+		Level:    6, // info
+		Facility: "writer_test",
+		Extra:    extra,
+	}
+
+	for _, i := range []CompressType{CompressGzip, CompressZlib} {
+		msg, err := sendAndRecvMsg(&m, i)
+		if err != nil {
+			t.Errorf("sendAndRecv: %s", err)
+			return
+		}
+
+		if msg.Short != short {
+			t.Errorf("msg.Short: expected %s, got %s", short, msg.Full)
+			return
+		}
+
+		if msg.Full != full {
+			t.Errorf("msg.Full: expected %s, got %s", full, msg.Full)
+			return
+		}
+
+		if len(msg.Extra) != 3 {
+			t.Errorf("extra extra fields in %v", msg.Extra)
+			return
+		}
+
+		if int64(msg.Extra["_a"].(float64)) != extra["_a"].(int64) {
+			t.Errorf("_a didn't roundtrip (%v != %v)", int64(msg.Extra["_a"].(float64)), extra["_a"].(int64))
+			return
+		}
+
+		if string(msg.Extra["_file"].(string)) != extra["_file"] {
+			t.Errorf("_file didn't roundtrip (%v != %v)", msg.Extra["_file"].(string), extra["_file"].(string))
+			return
+		}
+
+		if int(msg.Extra["_line"].(float64)) != extra["_line"].(int) {
+			t.Errorf("_line didn't roundtrip (%v != %v)", int(msg.Extra["_line"].(float64)), extra["_line"].(int))
+			return
+		}
+	}
+}