jsonfilelog.go 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. // Package jsonfilelog provides the default Logger implementation for
  2. // Docker logging. This logger logs to files on the host server in the
  3. // JSON format.
  4. package jsonfilelog
  5. import (
  6. "bytes"
  7. "encoding/json"
  8. "fmt"
  9. "io"
  10. "os"
  11. "strconv"
  12. "sync"
  13. "time"
  14. "gopkg.in/fsnotify.v1"
  15. "github.com/Sirupsen/logrus"
  16. "github.com/docker/docker/daemon/logger"
  17. "github.com/docker/docker/pkg/ioutils"
  18. "github.com/docker/docker/pkg/jsonlog"
  19. "github.com/docker/docker/pkg/pubsub"
  20. "github.com/docker/docker/pkg/tailfile"
  21. "github.com/docker/docker/pkg/timeutils"
  22. "github.com/docker/docker/pkg/units"
  23. )
  24. const (
  25. // Name is the name of the file that the jsonlogger logs to.
  26. Name = "json-file"
  27. maxJSONDecodeRetry = 10
  28. )
  29. // JSONFileLogger is Logger implementation for default Docker logging.
  30. type JSONFileLogger struct {
  31. buf *bytes.Buffer
  32. f *os.File // store for closing
  33. mu sync.Mutex // protects buffer
  34. capacity int64 //maximum size of each file
  35. n int //maximum number of files
  36. ctx logger.Context
  37. readers map[*logger.LogWatcher]struct{} // stores the active log followers
  38. notifyRotate *pubsub.Publisher
  39. }
  40. func init() {
  41. if err := logger.RegisterLogDriver(Name, New); err != nil {
  42. logrus.Fatal(err)
  43. }
  44. if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil {
  45. logrus.Fatal(err)
  46. }
  47. }
  48. // New creates new JSONFileLogger which writes to filename passed in
  49. // on given context.
  50. func New(ctx logger.Context) (logger.Logger, error) {
  51. log, err := os.OpenFile(ctx.LogPath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
  52. if err != nil {
  53. return nil, err
  54. }
  55. var capval int64 = -1
  56. if capacity, ok := ctx.Config["max-size"]; ok {
  57. var err error
  58. capval, err = units.FromHumanSize(capacity)
  59. if err != nil {
  60. return nil, err
  61. }
  62. }
  63. var maxFiles = 1
  64. if maxFileString, ok := ctx.Config["max-file"]; ok {
  65. maxFiles, err = strconv.Atoi(maxFileString)
  66. if err != nil {
  67. return nil, err
  68. }
  69. if maxFiles < 1 {
  70. return nil, fmt.Errorf("max-file cannot be less than 1")
  71. }
  72. }
  73. return &JSONFileLogger{
  74. f: log,
  75. buf: bytes.NewBuffer(nil),
  76. ctx: ctx,
  77. capacity: capval,
  78. n: maxFiles,
  79. readers: make(map[*logger.LogWatcher]struct{}),
  80. notifyRotate: pubsub.NewPublisher(0, 1),
  81. }, nil
  82. }
  83. // Log converts logger.Message to jsonlog.JSONLog and serializes it to file.
  84. func (l *JSONFileLogger) Log(msg *logger.Message) error {
  85. l.mu.Lock()
  86. defer l.mu.Unlock()
  87. timestamp, err := timeutils.FastMarshalJSON(msg.Timestamp)
  88. if err != nil {
  89. return err
  90. }
  91. err = (&jsonlog.JSONLogs{Log: append(msg.Line, '\n'), Stream: msg.Source, Created: timestamp}).MarshalJSONBuf(l.buf)
  92. if err != nil {
  93. return err
  94. }
  95. l.buf.WriteByte('\n')
  96. _, err = writeLog(l)
  97. return err
  98. }
  99. func writeLog(l *JSONFileLogger) (int64, error) {
  100. if l.capacity == -1 {
  101. return writeToBuf(l)
  102. }
  103. meta, err := l.f.Stat()
  104. if err != nil {
  105. return -1, err
  106. }
  107. if meta.Size() >= l.capacity {
  108. name := l.f.Name()
  109. if err := l.f.Close(); err != nil {
  110. return -1, err
  111. }
  112. if err := rotate(name, l.n); err != nil {
  113. return -1, err
  114. }
  115. file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
  116. if err != nil {
  117. return -1, err
  118. }
  119. l.f = file
  120. l.notifyRotate.Publish(struct{}{})
  121. }
  122. return writeToBuf(l)
  123. }
  124. func writeToBuf(l *JSONFileLogger) (int64, error) {
  125. i, err := l.buf.WriteTo(l.f)
  126. if err != nil {
  127. l.buf = bytes.NewBuffer(nil)
  128. }
  129. return i, err
  130. }
  131. func rotate(name string, n int) error {
  132. if n < 2 {
  133. return nil
  134. }
  135. for i := n - 1; i > 1; i-- {
  136. oldFile := name + "." + strconv.Itoa(i)
  137. replacingFile := name + "." + strconv.Itoa(i-1)
  138. if err := backup(oldFile, replacingFile); err != nil {
  139. return err
  140. }
  141. }
  142. if err := backup(name+".1", name); err != nil {
  143. return err
  144. }
  145. return nil
  146. }
  147. // backup renames a file from curr to old, creating an empty file curr if it does not exist.
  148. func backup(old, curr string) error {
  149. if _, err := os.Stat(old); !os.IsNotExist(err) {
  150. err := os.Remove(old)
  151. if err != nil {
  152. return err
  153. }
  154. }
  155. if _, err := os.Stat(curr); os.IsNotExist(err) {
  156. f, err := os.Create(curr)
  157. if err != nil {
  158. return err
  159. }
  160. f.Close()
  161. }
  162. return os.Rename(curr, old)
  163. }
  164. // ValidateLogOpt looks for json specific log options max-file & max-size.
  165. func ValidateLogOpt(cfg map[string]string) error {
  166. for key := range cfg {
  167. switch key {
  168. case "max-file":
  169. case "max-size":
  170. default:
  171. return fmt.Errorf("unknown log opt '%s' for json-file log driver", key)
  172. }
  173. }
  174. return nil
  175. }
  176. // LogPath returns the location the given json logger logs to.
  177. func (l *JSONFileLogger) LogPath() string {
  178. return l.ctx.LogPath
  179. }
  180. // Close closes underlying file and signals all readers to stop.
  181. func (l *JSONFileLogger) Close() error {
  182. l.mu.Lock()
  183. err := l.f.Close()
  184. for r := range l.readers {
  185. r.Close()
  186. delete(l.readers, r)
  187. }
  188. l.mu.Unlock()
  189. return err
  190. }
  191. // Name returns name of this logger.
  192. func (l *JSONFileLogger) Name() string {
  193. return Name
  194. }
  195. func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
  196. l.Reset()
  197. if err := dec.Decode(l); err != nil {
  198. return nil, err
  199. }
  200. msg := &logger.Message{
  201. Source: l.Stream,
  202. Timestamp: l.Created,
  203. Line: []byte(l.Log),
  204. }
  205. return msg, nil
  206. }
  207. // ReadLogs implements the logger's LogReader interface for the logs
  208. // created by this driver.
  209. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
  210. logWatcher := logger.NewLogWatcher()
  211. go l.readLogs(logWatcher, config)
  212. return logWatcher
  213. }
  214. func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
  215. defer close(logWatcher.Msg)
  216. pth := l.ctx.LogPath
  217. var files []io.ReadSeeker
  218. for i := l.n; i > 1; i-- {
  219. f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1))
  220. if err != nil {
  221. if !os.IsNotExist(err) {
  222. logWatcher.Err <- err
  223. break
  224. }
  225. continue
  226. }
  227. defer f.Close()
  228. files = append(files, f)
  229. }
  230. latestFile, err := os.Open(pth)
  231. if err != nil {
  232. logWatcher.Err <- err
  233. return
  234. }
  235. defer latestFile.Close()
  236. files = append(files, latestFile)
  237. tailer := ioutils.MultiReadSeeker(files...)
  238. if config.Tail != 0 {
  239. tailFile(tailer, logWatcher, config.Tail, config.Since)
  240. }
  241. if !config.Follow {
  242. return
  243. }
  244. if config.Tail >= 0 {
  245. latestFile.Seek(0, os.SEEK_END)
  246. }
  247. l.mu.Lock()
  248. l.readers[logWatcher] = struct{}{}
  249. l.mu.Unlock()
  250. notifyRotate := l.notifyRotate.Subscribe()
  251. followLogs(latestFile, logWatcher, notifyRotate, config.Since)
  252. l.mu.Lock()
  253. delete(l.readers, logWatcher)
  254. l.mu.Unlock()
  255. l.notifyRotate.Evict(notifyRotate)
  256. }
  257. func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
  258. var rdr io.Reader = f
  259. if tail > 0 {
  260. ls, err := tailfile.TailFile(f, tail)
  261. if err != nil {
  262. logWatcher.Err <- err
  263. return
  264. }
  265. rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
  266. }
  267. dec := json.NewDecoder(rdr)
  268. l := &jsonlog.JSONLog{}
  269. for {
  270. msg, err := decodeLogLine(dec, l)
  271. if err != nil {
  272. if err != io.EOF {
  273. logWatcher.Err <- err
  274. }
  275. return
  276. }
  277. if !since.IsZero() && msg.Timestamp.Before(since) {
  278. continue
  279. }
  280. logWatcher.Msg <- msg
  281. }
  282. }
  283. func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
  284. dec := json.NewDecoder(f)
  285. l := &jsonlog.JSONLog{}
  286. fileWatcher, err := fsnotify.NewWatcher()
  287. if err != nil {
  288. logWatcher.Err <- err
  289. return
  290. }
  291. defer fileWatcher.Close()
  292. if err := fileWatcher.Add(f.Name()); err != nil {
  293. logWatcher.Err <- err
  294. return
  295. }
  296. var retries int
  297. for {
  298. msg, err := decodeLogLine(dec, l)
  299. if err != nil {
  300. if err != io.EOF {
  301. // try again because this shouldn't happen
  302. if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
  303. dec = json.NewDecoder(f)
  304. retries++
  305. continue
  306. }
  307. logWatcher.Err <- err
  308. return
  309. }
  310. select {
  311. case <-fileWatcher.Events:
  312. dec = json.NewDecoder(f)
  313. continue
  314. case <-fileWatcher.Errors:
  315. logWatcher.Err <- err
  316. return
  317. case <-logWatcher.WatchClose():
  318. return
  319. case <-notifyRotate:
  320. fileWatcher.Remove(f.Name())
  321. f, err = os.Open(f.Name())
  322. if err != nil {
  323. logWatcher.Err <- err
  324. return
  325. }
  326. if err := fileWatcher.Add(f.Name()); err != nil {
  327. logWatcher.Err <- err
  328. }
  329. dec = json.NewDecoder(f)
  330. continue
  331. }
  332. }
  333. retries = 0 // reset retries since we've succeeded
  334. if !since.IsZero() && msg.Timestamp.Before(since) {
  335. continue
  336. }
  337. select {
  338. case logWatcher.Msg <- msg:
  339. case <-logWatcher.WatchClose():
  340. logWatcher.Msg <- msg
  341. for {
  342. msg, err := decodeLogLine(dec, l)
  343. if err != nil {
  344. return
  345. }
  346. if !since.IsZero() && msg.Timestamp.Before(since) {
  347. continue
  348. }
  349. logWatcher.Msg <- msg
  350. }
  351. }
  352. }
  353. }