read.go 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. package jsonfilelog
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "os"
  9. "time"
  10. "github.com/fsnotify/fsnotify"
  11. "golang.org/x/net/context"
  12. "github.com/Sirupsen/logrus"
  13. "github.com/docker/docker/daemon/logger"
  14. "github.com/docker/docker/pkg/filenotify"
  15. "github.com/docker/docker/pkg/ioutils"
  16. "github.com/docker/docker/pkg/jsonlog"
  17. "github.com/docker/docker/pkg/tailfile"
  18. )
  19. const maxJSONDecodeRetry = 20000
  20. func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
  21. l.Reset()
  22. if err := dec.Decode(l); err != nil {
  23. return nil, err
  24. }
  25. msg := &logger.Message{
  26. Source: l.Stream,
  27. Timestamp: l.Created,
  28. Line: []byte(l.Log),
  29. Attrs: l.Attrs,
  30. }
  31. return msg, nil
  32. }
  33. // ReadLogs implements the logger's LogReader interface for the logs
  34. // created by this driver.
  35. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
  36. logWatcher := logger.NewLogWatcher()
  37. go l.readLogs(logWatcher, config)
  38. return logWatcher
  39. }
  40. func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
  41. defer close(logWatcher.Msg)
  42. // lock so the read stream doesn't get corrupted due to rotations or other log data written while we read
  43. // This will block writes!!!
  44. l.mu.Lock()
  45. pth := l.writer.LogPath()
  46. var files []io.ReadSeeker
  47. for i := l.writer.MaxFiles(); i > 1; i-- {
  48. f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1))
  49. if err != nil {
  50. if !os.IsNotExist(err) {
  51. logWatcher.Err <- err
  52. break
  53. }
  54. continue
  55. }
  56. defer f.Close()
  57. files = append(files, f)
  58. }
  59. latestFile, err := os.Open(pth)
  60. if err != nil {
  61. logWatcher.Err <- err
  62. l.mu.Unlock()
  63. return
  64. }
  65. defer latestFile.Close()
  66. if config.Tail != 0 {
  67. tailer := ioutils.MultiReadSeeker(append(files, latestFile)...)
  68. tailFile(tailer, logWatcher, config.Tail, config.Since)
  69. }
  70. // close all the rotated files
  71. for _, f := range files {
  72. if err := f.(io.Closer).Close(); err != nil {
  73. logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err)
  74. }
  75. }
  76. if !config.Follow {
  77. if err := latestFile.Close(); err != nil {
  78. logrus.Errorf("Error closing file: %v", err)
  79. }
  80. l.mu.Unlock()
  81. return
  82. }
  83. if config.Tail >= 0 {
  84. latestFile.Seek(0, os.SEEK_END)
  85. }
  86. l.readers[logWatcher] = struct{}{}
  87. l.mu.Unlock()
  88. notifyRotate := l.writer.NotifyRotate()
  89. followLogs(latestFile, logWatcher, notifyRotate, config.Since)
  90. l.mu.Lock()
  91. delete(l.readers, logWatcher)
  92. l.mu.Unlock()
  93. l.writer.NotifyRotateEvict(notifyRotate)
  94. }
  95. func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
  96. var rdr io.Reader
  97. rdr = f
  98. if tail > 0 {
  99. ls, err := tailfile.TailFile(f, tail)
  100. if err != nil {
  101. logWatcher.Err <- err
  102. return
  103. }
  104. rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
  105. }
  106. dec := json.NewDecoder(rdr)
  107. l := &jsonlog.JSONLog{}
  108. for {
  109. msg, err := decodeLogLine(dec, l)
  110. if err != nil {
  111. if err != io.EOF {
  112. logWatcher.Err <- err
  113. }
  114. return
  115. }
  116. if !since.IsZero() && msg.Timestamp.Before(since) {
  117. continue
  118. }
  119. select {
  120. case <-logWatcher.WatchClose():
  121. return
  122. case logWatcher.Msg <- msg:
  123. }
  124. }
  125. }
  126. func watchFile(name string) (filenotify.FileWatcher, error) {
  127. fileWatcher, err := filenotify.New()
  128. if err != nil {
  129. return nil, err
  130. }
  131. if err := fileWatcher.Add(name); err != nil {
  132. logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err)
  133. fileWatcher.Close()
  134. fileWatcher = filenotify.NewPollingWatcher()
  135. if err := fileWatcher.Add(name); err != nil {
  136. fileWatcher.Close()
  137. logrus.Debugf("error watching log file for modifications: %v", err)
  138. return nil, err
  139. }
  140. }
  141. return fileWatcher, nil
  142. }
  143. func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
  144. dec := json.NewDecoder(f)
  145. l := &jsonlog.JSONLog{}
  146. name := f.Name()
  147. fileWatcher, err := watchFile(name)
  148. if err != nil {
  149. logWatcher.Err <- err
  150. return
  151. }
  152. defer func() {
  153. f.Close()
  154. fileWatcher.Remove(name)
  155. fileWatcher.Close()
  156. }()
  157. ctx, cancel := context.WithCancel(context.Background())
  158. defer cancel()
  159. go func() {
  160. select {
  161. case <-logWatcher.WatchClose():
  162. fileWatcher.Remove(name)
  163. cancel()
  164. case <-ctx.Done():
  165. return
  166. }
  167. }()
  168. var retries int
  169. handleRotate := func() error {
  170. f.Close()
  171. fileWatcher.Remove(name)
  172. // retry when the file doesn't exist
  173. for retries := 0; retries <= 5; retries++ {
  174. f, err = os.Open(name)
  175. if err == nil || !os.IsNotExist(err) {
  176. break
  177. }
  178. }
  179. if err != nil {
  180. return err
  181. }
  182. if err := fileWatcher.Add(name); err != nil {
  183. return err
  184. }
  185. dec = json.NewDecoder(f)
  186. return nil
  187. }
  188. errRetry := errors.New("retry")
  189. errDone := errors.New("done")
  190. waitRead := func() error {
  191. select {
  192. case e := <-fileWatcher.Events():
  193. switch e.Op {
  194. case fsnotify.Write:
  195. dec = json.NewDecoder(f)
  196. return nil
  197. case fsnotify.Rename, fsnotify.Remove:
  198. select {
  199. case <-notifyRotate:
  200. case <-ctx.Done():
  201. return errDone
  202. }
  203. if err := handleRotate(); err != nil {
  204. return err
  205. }
  206. return nil
  207. }
  208. return errRetry
  209. case err := <-fileWatcher.Errors():
  210. logrus.Debug("logger got error watching file: %v", err)
  211. // Something happened, let's try and stay alive and create a new watcher
  212. if retries <= 5 {
  213. fileWatcher.Close()
  214. fileWatcher, err = watchFile(name)
  215. if err != nil {
  216. return err
  217. }
  218. retries++
  219. return errRetry
  220. }
  221. return err
  222. case <-ctx.Done():
  223. return errDone
  224. }
  225. }
  226. handleDecodeErr := func(err error) error {
  227. if err == io.EOF {
  228. for err := waitRead(); err != nil; {
  229. if err == errRetry {
  230. // retry the waitRead
  231. continue
  232. }
  233. return err
  234. }
  235. return nil
  236. }
  237. // try again because this shouldn't happen
  238. if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
  239. dec = json.NewDecoder(f)
  240. retries++
  241. return nil
  242. }
  243. // io.ErrUnexpectedEOF is returned from json.Decoder when there is
  244. // remaining data in the parser's buffer while an io.EOF occurs.
  245. // If the json logger writes a partial json log entry to the disk
  246. // while at the same time the decoder tries to decode it, the race condition happens.
  247. if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
  248. reader := io.MultiReader(dec.Buffered(), f)
  249. dec = json.NewDecoder(reader)
  250. retries++
  251. return nil
  252. }
  253. return err
  254. }
  255. // main loop
  256. for {
  257. msg, err := decodeLogLine(dec, l)
  258. if err != nil {
  259. if err := handleDecodeErr(err); err != nil {
  260. if err == errDone {
  261. return
  262. }
  263. // we got an unrecoverable error, so return
  264. logWatcher.Err <- err
  265. return
  266. }
  267. // ready to try again
  268. continue
  269. }
  270. retries = 0 // reset retries since we've succeeded
  271. if !since.IsZero() && msg.Timestamp.Before(since) {
  272. continue
  273. }
  274. select {
  275. case logWatcher.Msg <- msg:
  276. case <-ctx.Done():
  277. logWatcher.Msg <- msg
  278. for {
  279. msg, err := decodeLogLine(dec, l)
  280. if err != nil {
  281. return
  282. }
  283. if !since.IsZero() && msg.Timestamp.Before(since) {
  284. continue
  285. }
  286. logWatcher.Msg <- msg
  287. }
  288. }
  289. }
  290. }