copier.go 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. package logger
  2. import (
  3. "bytes"
  4. "io"
  5. "sync"
  6. "time"
  7. "github.com/Sirupsen/logrus"
  8. )
  9. const (
  10. bufSize = 16 * 1024
  11. readSize = 2 * 1024
  12. )
  13. // Copier can copy logs from specified sources to Logger and attach Timestamp.
  14. // Writes are concurrent, so you need implement some sync in your logger.
  15. type Copier struct {
  16. // srcs is map of name -> reader pairs, for example "stdout", "stderr"
  17. srcs map[string]io.Reader
  18. dst Logger
  19. copyJobs sync.WaitGroup
  20. closeOnce sync.Once
  21. closed chan struct{}
  22. }
  23. // NewCopier creates a new Copier
  24. func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier {
  25. return &Copier{
  26. srcs: srcs,
  27. dst: dst,
  28. closed: make(chan struct{}),
  29. }
  30. }
  31. // Run starts logs copying
  32. func (c *Copier) Run() {
  33. for src, w := range c.srcs {
  34. c.copyJobs.Add(1)
  35. go c.copySrc(src, w)
  36. }
  37. }
  38. func (c *Copier) copySrc(name string, src io.Reader) {
  39. defer c.copyJobs.Done()
  40. buf := make([]byte, bufSize)
  41. n := 0
  42. eof := false
  43. for {
  44. select {
  45. case <-c.closed:
  46. return
  47. default:
  48. // Work out how much more data we are okay with reading this time.
  49. upto := n + readSize
  50. if upto > cap(buf) {
  51. upto = cap(buf)
  52. }
  53. // Try to read that data.
  54. if upto > n {
  55. read, err := src.Read(buf[n:upto])
  56. if err != nil {
  57. if err != io.EOF {
  58. logrus.Errorf("Error scanning log stream: %s", err)
  59. return
  60. }
  61. eof = true
  62. }
  63. n += read
  64. }
  65. // If we have no data to log, and there's no more coming, we're done.
  66. if n == 0 && eof {
  67. return
  68. }
  69. // Break up the data that we've buffered up into lines, and log each in turn.
  70. p := 0
  71. for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') {
  72. select {
  73. case <-c.closed:
  74. return
  75. default:
  76. msg := NewMessage()
  77. msg.Source = name
  78. msg.Timestamp = time.Now().UTC()
  79. msg.Line = append(msg.Line, buf[p:p+q]...)
  80. if logErr := c.dst.Log(msg); logErr != nil {
  81. logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr)
  82. }
  83. }
  84. p += q + 1
  85. }
  86. // If there's no more coming, or the buffer is full but
  87. // has no newlines, log whatever we haven't logged yet,
  88. // noting that it's a partial log line.
  89. if eof || (p == 0 && n == len(buf)) {
  90. if p < n {
  91. msg := NewMessage()
  92. msg.Source = name
  93. msg.Timestamp = time.Now().UTC()
  94. msg.Line = append(msg.Line, buf[p:n]...)
  95. msg.Partial = true
  96. if logErr := c.dst.Log(msg); logErr != nil {
  97. logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr)
  98. }
  99. p = 0
  100. n = 0
  101. }
  102. if eof {
  103. return
  104. }
  105. }
  106. // Move any unlogged data to the front of the buffer in preparation for another read.
  107. if p > 0 {
  108. copy(buf[0:], buf[p:n])
  109. n -= p
  110. }
  111. }
  112. }
  113. }
  114. // Wait waits until all copying is done
  115. func (c *Copier) Wait() {
  116. c.copyJobs.Wait()
  117. }
  118. // Close closes the copier
  119. func (c *Copier) Close() {
  120. c.closeOnce.Do(func() {
  121. close(c.closed)
  122. })
  123. }