helpers.go 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /*
  2. Copyright The containerd Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package content
  14. import (
  15. "context"
  16. "io"
  17. "io/ioutil"
  18. "math/rand"
  19. "sync"
  20. "time"
  21. "github.com/containerd/containerd/errdefs"
  22. "github.com/opencontainers/go-digest"
  23. ocispec "github.com/opencontainers/image-spec/specs-go/v1"
  24. "github.com/pkg/errors"
  25. )
  26. var bufPool = sync.Pool{
  27. New: func() interface{} {
  28. buffer := make([]byte, 1<<20)
  29. return &buffer
  30. },
  31. }
  32. // NewReader returns a io.Reader from a ReaderAt
  33. func NewReader(ra ReaderAt) io.Reader {
  34. rd := io.NewSectionReader(ra, 0, ra.Size())
  35. return rd
  36. }
  37. // ReadBlob retrieves the entire contents of the blob from the provider.
  38. //
  39. // Avoid using this for large blobs, such as layers.
  40. func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) {
  41. ra, err := provider.ReaderAt(ctx, desc)
  42. if err != nil {
  43. return nil, err
  44. }
  45. defer ra.Close()
  46. p := make([]byte, ra.Size())
  47. _, err = ra.ReadAt(p, 0)
  48. return p, err
  49. }
  50. // WriteBlob writes data with the expected digest into the content store. If
  51. // expected already exists, the method returns immediately and the reader will
  52. // not be consumed.
  53. //
  54. // This is useful when the digest and size are known beforehand.
  55. //
  56. // Copy is buffered, so no need to wrap reader in buffered io.
  57. func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error {
  58. cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
  59. if err != nil {
  60. if !errdefs.IsAlreadyExists(err) {
  61. return errors.Wrap(err, "failed to open writer")
  62. }
  63. return nil // all ready present
  64. }
  65. defer cw.Close()
  66. return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...)
  67. }
  68. // OpenWriter opens a new writer for the given reference, retrying if the writer
  69. // is locked until the reference is available or returns an error.
  70. func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) {
  71. var (
  72. cw Writer
  73. err error
  74. retry = 16
  75. )
  76. for {
  77. cw, err = cs.Writer(ctx, opts...)
  78. if err != nil {
  79. if !errdefs.IsUnavailable(err) {
  80. return nil, err
  81. }
  82. // TODO: Check status to determine if the writer is active,
  83. // continue waiting while active, otherwise return lock
  84. // error or abort. Requires asserting for an ingest manager
  85. select {
  86. case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))):
  87. if retry < 2048 {
  88. retry = retry << 1
  89. }
  90. continue
  91. case <-ctx.Done():
  92. // Propagate lock error
  93. return nil, err
  94. }
  95. }
  96. break
  97. }
  98. return cw, err
  99. }
  100. // Copy copies data with the expected digest from the reader into the
  101. // provided content store writer. This copy commits the writer.
  102. //
  103. // This is useful when the digest and size are known beforehand. When
  104. // the size or digest is unknown, these values may be empty.
  105. //
  106. // Copy is buffered, so no need to wrap reader in buffered io.
  107. func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
  108. ws, err := cw.Status()
  109. if err != nil {
  110. return errors.Wrap(err, "failed to get status")
  111. }
  112. if ws.Offset > 0 {
  113. r, err = seekReader(r, ws.Offset, size)
  114. if err != nil {
  115. return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
  116. }
  117. }
  118. if _, err := copyWithBuffer(cw, r); err != nil {
  119. return errors.Wrap(err, "failed to copy")
  120. }
  121. if err := cw.Commit(ctx, size, expected, opts...); err != nil {
  122. if !errdefs.IsAlreadyExists(err) {
  123. return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
  124. }
  125. }
  126. return nil
  127. }
  128. // CopyReaderAt copies to a writer from a given reader at for the given
  129. // number of bytes. This copy does not commit the writer.
  130. func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
  131. ws, err := cw.Status()
  132. if err != nil {
  133. return err
  134. }
  135. _, err = copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
  136. return err
  137. }
  138. // seekReader attempts to seek the reader to the given offset, either by
  139. // resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
  140. // up to the given offset.
  141. func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
  142. // attempt to resolve r as a seeker and setup the offset.
  143. seeker, ok := r.(io.Seeker)
  144. if ok {
  145. nn, err := seeker.Seek(offset, io.SeekStart)
  146. if nn != offset {
  147. return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
  148. }
  149. if err != nil {
  150. return nil, err
  151. }
  152. return r, nil
  153. }
  154. // ok, let's try io.ReaderAt!
  155. readerAt, ok := r.(io.ReaderAt)
  156. if ok && size > offset {
  157. sr := io.NewSectionReader(readerAt, offset, size)
  158. return sr, nil
  159. }
  160. // well then, let's just discard up to the offset
  161. n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset))
  162. if err != nil {
  163. return nil, errors.Wrap(err, "failed to discard to offset")
  164. }
  165. if n != offset {
  166. return nil, errors.Errorf("unable to discard to offset")
  167. }
  168. return r, nil
  169. }
  170. func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
  171. buf := bufPool.Get().(*[]byte)
  172. written, err = io.CopyBuffer(dst, src, *buf)
  173. bufPool.Put(buf)
  174. return
  175. }