tailfile.go 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. // Package tailfile provides helper functions to read the nth lines of any
  2. // ReadSeeker.
  3. package tailfile // import "github.com/docker/docker/pkg/tailfile"
  4. import (
  5. "bufio"
  6. "bytes"
  7. "context"
  8. "errors"
  9. "io"
  10. "os"
  11. )
  12. const blockSize = 1024
  13. var eol = []byte("\n")
  14. // ErrNonPositiveLinesNumber is an error returned if the lines number was negative.
  15. var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive")
  16. // TailFile returns last n lines of the passed in file.
  17. func TailFile(f *os.File, n int) ([][]byte, error) {
  18. size, err := f.Seek(0, io.SeekEnd)
  19. if err != nil {
  20. return nil, err
  21. }
  22. rAt := io.NewSectionReader(f, 0, size)
  23. r, nLines, err := NewTailReader(context.Background(), rAt, n)
  24. if err != nil {
  25. return nil, err
  26. }
  27. buf := make([][]byte, 0, nLines)
  28. scanner := bufio.NewScanner(r)
  29. for scanner.Scan() {
  30. buf = append(buf, scanner.Bytes())
  31. }
  32. return buf, nil
  33. }
  34. // SizeReaderAt is an interface used to get a ReaderAt as well as the size of the underlying reader.
  35. // Note that the size of the underlying reader should not change when using this interface.
  36. type SizeReaderAt interface {
  37. io.ReaderAt
  38. Size() int64
  39. }
  40. // NewTailReader scopes the passed in reader to just the last N lines passed in
  41. func NewTailReader(ctx context.Context, r SizeReaderAt, reqLines int) (io.Reader, int, error) {
  42. return NewTailReaderWithDelimiter(ctx, r, reqLines, eol)
  43. }
  44. // NewTailReaderWithDelimiter scopes the passed in reader to just the last N lines passed in
  45. // In this case a "line" is defined by the passed in delimiter.
  46. //
  47. // Delimiter lengths should be generally small, no more than 12 bytes
  48. func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines int, delimiter []byte) (io.Reader, int, error) {
  49. if reqLines < 1 {
  50. return nil, 0, ErrNonPositiveLinesNumber
  51. }
  52. if len(delimiter) == 0 {
  53. return nil, 0, errors.New("must provide a delimiter")
  54. }
  55. var (
  56. size = r.Size()
  57. tailStart int64
  58. tailEnd = size
  59. found int
  60. )
  61. if int64(len(delimiter)) >= size {
  62. return bytes.NewReader(nil), 0, nil
  63. }
  64. scanner := newScanner(r, delimiter)
  65. for scanner.Scan(ctx) {
  66. if err := scanner.Err(); err != nil {
  67. return nil, 0, scanner.Err()
  68. }
  69. found++
  70. if found == 1 {
  71. tailEnd = scanner.End()
  72. }
  73. if found == reqLines {
  74. break
  75. }
  76. }
  77. tailStart = scanner.Start(ctx)
  78. if found == 0 {
  79. return bytes.NewReader(nil), 0, nil
  80. }
  81. if found < reqLines && tailStart != 0 {
  82. tailStart = 0
  83. }
  84. return io.NewSectionReader(r, tailStart, tailEnd-tailStart), found, nil
  85. }
  86. func newScanner(r SizeReaderAt, delim []byte) *scanner {
  87. size := r.Size()
  88. readSize := blockSize
  89. if readSize > int(size) {
  90. readSize = int(size)
  91. }
  92. // silly case...
  93. if len(delim) >= readSize/2 {
  94. readSize = len(delim)*2 + 2
  95. }
  96. return &scanner{
  97. r: r,
  98. pos: size,
  99. buf: make([]byte, readSize),
  100. delim: delim,
  101. }
  102. }
  103. type scanner struct {
  104. r SizeReaderAt
  105. pos int64
  106. buf []byte
  107. delim []byte
  108. err error
  109. idx int
  110. }
  111. func (s *scanner) Start(ctx context.Context) int64 {
  112. if s.idx > 0 {
  113. idx := bytes.LastIndex(s.buf[:s.idx], s.delim)
  114. if idx >= 0 {
  115. return s.pos + int64(idx) + int64(len(s.delim))
  116. }
  117. }
  118. // slow path
  119. buf := make([]byte, len(s.buf))
  120. copy(buf, s.buf)
  121. readAhead := &scanner{
  122. r: s.r,
  123. pos: s.pos,
  124. delim: s.delim,
  125. idx: s.idx,
  126. buf: buf,
  127. }
  128. if !readAhead.Scan(ctx) {
  129. return 0
  130. }
  131. return readAhead.End()
  132. }
  133. func (s *scanner) End() int64 {
  134. return s.pos + int64(s.idx) + int64(len(s.delim))
  135. }
  136. func (s *scanner) Err() error {
  137. return s.err
  138. }
  139. func (s *scanner) Scan(ctx context.Context) bool {
  140. if s.err != nil {
  141. return false
  142. }
  143. for {
  144. select {
  145. case <-ctx.Done():
  146. s.err = ctx.Err()
  147. return false
  148. default:
  149. }
  150. idx := s.idx - len(s.delim)
  151. if idx < 0 {
  152. readSize := int(s.pos)
  153. if readSize > len(s.buf) {
  154. readSize = len(s.buf)
  155. }
  156. if readSize < len(s.delim) {
  157. return false
  158. }
  159. offset := s.pos - int64(readSize)
  160. n, err := s.r.ReadAt(s.buf[:readSize], offset)
  161. if err != nil && err != io.EOF {
  162. s.err = err
  163. return false
  164. }
  165. s.pos -= int64(n)
  166. idx = n
  167. }
  168. s.idx = bytes.LastIndex(s.buf[:idx], s.delim)
  169. if s.idx >= 0 {
  170. return true
  171. }
  172. if len(s.delim) > 1 && s.pos > 0 {
  173. // in this case, there may be a partial delimiter at the front of the buffer, so set the position forward
  174. // up to the maximum size partial that could be there so it can be read again in the next iteration with any
  175. // potential remainder.
  176. // An example where delimiter is `####`:
  177. // [##asdfqwerty]
  178. // ^
  179. // This resets the position to where the arrow is pointing.
  180. // It could actually check if a partial exists and at the front, but that is pretty similar to the indexing
  181. // code above though a bit more complex since each byte has to be checked (`len(delimiter)-1`) factorial).
  182. // It's much simpler and cleaner to just re-read `len(delimiter)-1` bytes again.
  183. s.pos += int64(len(s.delim)) - 1
  184. }
  185. }
  186. }