read.go 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. package jsonfilelog
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "os"
  9. "time"
  10. "github.com/fsnotify/fsnotify"
  11. "golang.org/x/net/context"
  12. "github.com/Sirupsen/logrus"
  13. "github.com/docker/docker/daemon/logger"
  14. "github.com/docker/docker/pkg/filenotify"
  15. "github.com/docker/docker/pkg/ioutils"
  16. "github.com/docker/docker/pkg/jsonlog"
  17. "github.com/docker/docker/pkg/tailfile"
  18. )
  19. const maxJSONDecodeRetry = 20000
  20. func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
  21. l.Reset()
  22. if err := dec.Decode(l); err != nil {
  23. return nil, err
  24. }
  25. msg := &logger.Message{
  26. Source: l.Stream,
  27. Timestamp: l.Created,
  28. Line: []byte(l.Log),
  29. Attrs: l.Attrs,
  30. }
  31. return msg, nil
  32. }
  33. // ReadLogs implements the logger's LogReader interface for the logs
  34. // created by this driver.
  35. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
  36. logWatcher := logger.NewLogWatcher()
  37. go l.readLogs(logWatcher, config)
  38. return logWatcher
  39. }
  40. func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
  41. defer close(logWatcher.Msg)
  42. // lock so the read stream doesn't get corrupted due to rotations or other log data written while we read
  43. // This will block writes!!!
  44. l.mu.Lock()
  45. pth := l.writer.LogPath()
  46. var files []io.ReadSeeker
  47. for i := l.writer.MaxFiles(); i > 1; i-- {
  48. f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1))
  49. if err != nil {
  50. if !os.IsNotExist(err) {
  51. logWatcher.Err <- err
  52. break
  53. }
  54. continue
  55. }
  56. defer f.Close()
  57. files = append(files, f)
  58. }
  59. latestFile, err := os.Open(pth)
  60. if err != nil {
  61. logWatcher.Err <- err
  62. l.mu.Unlock()
  63. return
  64. }
  65. defer latestFile.Close()
  66. if config.Tail != 0 {
  67. tailer := ioutils.MultiReadSeeker(append(files, latestFile)...)
  68. tailFile(tailer, logWatcher, config.Tail, config.Since)
  69. }
  70. // close all the rotated files
  71. for _, f := range files {
  72. if err := f.(io.Closer).Close(); err != nil {
  73. logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err)
  74. }
  75. }
  76. if !config.Follow {
  77. if err := latestFile.Close(); err != nil {
  78. logrus.Errorf("Error closing file: %v", err)
  79. }
  80. l.mu.Unlock()
  81. return
  82. }
  83. if config.Tail >= 0 {
  84. latestFile.Seek(0, os.SEEK_END)
  85. }
  86. l.readers[logWatcher] = struct{}{}
  87. l.mu.Unlock()
  88. notifyRotate := l.writer.NotifyRotate()
  89. followLogs(latestFile, logWatcher, notifyRotate, config.Since)
  90. l.mu.Lock()
  91. delete(l.readers, logWatcher)
  92. l.mu.Unlock()
  93. l.writer.NotifyRotateEvict(notifyRotate)
  94. }
  95. func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
  96. var rdr io.Reader
  97. rdr = f
  98. if tail > 0 {
  99. ls, err := tailfile.TailFile(f, tail)
  100. if err != nil {
  101. logWatcher.Err <- err
  102. return
  103. }
  104. rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
  105. }
  106. dec := json.NewDecoder(rdr)
  107. l := &jsonlog.JSONLog{}
  108. for {
  109. msg, err := decodeLogLine(dec, l)
  110. if err != nil {
  111. if err != io.EOF {
  112. logWatcher.Err <- err
  113. }
  114. return
  115. }
  116. if !since.IsZero() && msg.Timestamp.Before(since) {
  117. continue
  118. }
  119. logWatcher.Msg <- msg
  120. }
  121. }
  122. func watchFile(name string) (filenotify.FileWatcher, error) {
  123. fileWatcher, err := filenotify.New()
  124. if err != nil {
  125. return nil, err
  126. }
  127. if err := fileWatcher.Add(name); err != nil {
  128. logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err)
  129. fileWatcher.Close()
  130. fileWatcher = filenotify.NewPollingWatcher()
  131. if err := fileWatcher.Add(name); err != nil {
  132. fileWatcher.Close()
  133. logrus.Debugf("error watching log file for modifications: %v", err)
  134. return nil, err
  135. }
  136. }
  137. return fileWatcher, nil
  138. }
  139. func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
  140. dec := json.NewDecoder(f)
  141. l := &jsonlog.JSONLog{}
  142. name := f.Name()
  143. fileWatcher, err := watchFile(name)
  144. if err != nil {
  145. logWatcher.Err <- err
  146. return
  147. }
  148. defer func() {
  149. f.Close()
  150. fileWatcher.Remove(name)
  151. fileWatcher.Close()
  152. }()
  153. ctx, cancel := context.WithCancel(context.Background())
  154. defer cancel()
  155. go func() {
  156. select {
  157. case <-logWatcher.WatchClose():
  158. fileWatcher.Remove(name)
  159. cancel()
  160. case <-ctx.Done():
  161. return
  162. }
  163. }()
  164. var retries int
  165. handleRotate := func() error {
  166. f.Close()
  167. fileWatcher.Remove(name)
  168. // retry when the file doesn't exist
  169. for retries := 0; retries <= 5; retries++ {
  170. f, err = os.Open(name)
  171. if err == nil || !os.IsNotExist(err) {
  172. break
  173. }
  174. }
  175. if err != nil {
  176. return err
  177. }
  178. if err := fileWatcher.Add(name); err != nil {
  179. return err
  180. }
  181. dec = json.NewDecoder(f)
  182. return nil
  183. }
  184. errRetry := errors.New("retry")
  185. errDone := errors.New("done")
  186. waitRead := func() error {
  187. select {
  188. case e := <-fileWatcher.Events():
  189. switch e.Op {
  190. case fsnotify.Write:
  191. dec = json.NewDecoder(f)
  192. return nil
  193. case fsnotify.Rename, fsnotify.Remove:
  194. select {
  195. case <-notifyRotate:
  196. case <-ctx.Done():
  197. return errDone
  198. }
  199. if err := handleRotate(); err != nil {
  200. return err
  201. }
  202. return nil
  203. }
  204. return errRetry
  205. case err := <-fileWatcher.Errors():
  206. logrus.Debug("logger got error watching file: %v", err)
  207. // Something happened, let's try and stay alive and create a new watcher
  208. if retries <= 5 {
  209. fileWatcher.Close()
  210. fileWatcher, err = watchFile(name)
  211. if err != nil {
  212. return err
  213. }
  214. retries++
  215. return errRetry
  216. }
  217. return err
  218. case <-ctx.Done():
  219. return errDone
  220. }
  221. }
  222. handleDecodeErr := func(err error) error {
  223. if err == io.EOF {
  224. for err := waitRead(); err != nil; {
  225. if err == errRetry {
  226. // retry the waitRead
  227. continue
  228. }
  229. return err
  230. }
  231. return nil
  232. }
  233. // try again because this shouldn't happen
  234. if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
  235. dec = json.NewDecoder(f)
  236. retries++
  237. return nil
  238. }
  239. // io.ErrUnexpectedEOF is returned from json.Decoder when there is
  240. // remaining data in the parser's buffer while an io.EOF occurs.
  241. // If the json logger writes a partial json log entry to the disk
  242. // while at the same time the decoder tries to decode it, the race condition happens.
  243. if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
  244. reader := io.MultiReader(dec.Buffered(), f)
  245. dec = json.NewDecoder(reader)
  246. retries++
  247. return nil
  248. }
  249. return err
  250. }
  251. // main loop
  252. for {
  253. msg, err := decodeLogLine(dec, l)
  254. if err != nil {
  255. if err := handleDecodeErr(err); err != nil {
  256. if err == errDone {
  257. return
  258. }
  259. // we got an unrecoverable error, so return
  260. logWatcher.Err <- err
  261. return
  262. }
  263. // ready to try again
  264. continue
  265. }
  266. retries = 0 // reset retries since we've succeeded
  267. if !since.IsZero() && msg.Timestamp.Before(since) {
  268. continue
  269. }
  270. select {
  271. case logWatcher.Msg <- msg:
  272. case <-ctx.Done():
  273. logWatcher.Msg <- msg
  274. for {
  275. msg, err := decodeLogLine(dec, l)
  276. if err != nil {
  277. return
  278. }
  279. if !since.IsZero() && msg.Timestamp.Before(since) {
  280. continue
  281. }
  282. logWatcher.Msg <- msg
  283. }
  284. }
  285. }
  286. }