fetch_linux.go 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. package plugin
  2. import (
  3. "context"
  4. "io"
  5. "net/http"
  6. "time"
  7. "github.com/containerd/containerd/content"
  8. c8derrdefs "github.com/containerd/containerd/errdefs"
  9. "github.com/containerd/containerd/images"
  10. "github.com/containerd/containerd/remotes"
  11. "github.com/containerd/containerd/remotes/docker"
  12. "github.com/docker/distribution/reference"
  13. "github.com/docker/docker/api/types"
  14. progressutils "github.com/docker/docker/distribution/utils"
  15. "github.com/docker/docker/pkg/chrootarchive"
  16. "github.com/docker/docker/pkg/ioutils"
  17. "github.com/docker/docker/pkg/progress"
  18. "github.com/docker/docker/pkg/stringid"
  19. digest "github.com/opencontainers/go-digest"
  20. specs "github.com/opencontainers/image-spec/specs-go/v1"
  21. "github.com/pkg/errors"
  22. "github.com/sirupsen/logrus"
  23. )
  24. const mediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
  25. // setupProgressOutput sets up the passed in writer to stream progress.
  26. //
  27. // The passed in cancel function is used by the progress writer to signal callers that there
  28. // is an issue writing to the stream.
  29. //
  30. // The returned function is used to wait for the progress writer to be finished.
  31. // Call it to make sure the progress writer is done before returning from your function as needed.
  32. func setupProgressOutput(outStream io.Writer, cancel func()) (progress.Output, func()) {
  33. var out progress.Output
  34. f := func() {}
  35. if outStream != nil {
  36. ch := make(chan progress.Progress, 100)
  37. out = progress.ChanOutput(ch)
  38. ctx, retCancel := context.WithCancel(context.Background())
  39. go func() {
  40. progressutils.WriteDistributionProgress(cancel, outStream, ch)
  41. retCancel()
  42. }()
  43. f = func() {
  44. close(ch)
  45. <-ctx.Done()
  46. }
  47. } else {
  48. out = progress.DiscardOutput()
  49. }
  50. return out, f
  51. }
  52. // fetch the content related to the passed in reference into the blob store and appends the provided images.Handlers
  53. // There is no need to use remotes.FetchHandler since it already gets set
  54. func (pm *Manager) fetch(ctx context.Context, ref reference.Named, auth *types.AuthConfig, out progress.Output, metaHeader http.Header, handlers ...images.Handler) (err error) {
  55. // We need to make sure we have a domain on the reference
  56. withDomain, err := reference.ParseNormalizedNamed(ref.String())
  57. if err != nil {
  58. return errors.Wrap(err, "error parsing plugin image reference")
  59. }
  60. // Make sure we can authenticate the request since the auth scope for plugin repos is different than a normal repo.
  61. ctx = docker.WithScope(ctx, scope(ref, false))
  62. // Make sure the fetch handler knows how to set a ref key for the plugin media type.
  63. // Without this the ref key is "unknown" and we see a nasty warning message in the logs
  64. ctx = remotes.WithMediaTypeKeyPrefix(ctx, mediaTypePluginConfig, "docker-plugin")
  65. resolver, err := pm.newResolver(ctx, nil, auth, metaHeader, false)
  66. if err != nil {
  67. return err
  68. }
  69. resolved, desc, err := resolver.Resolve(ctx, withDomain.String())
  70. if err != nil {
  71. // This is backwards compatible with older versions of the distribution registry.
  72. // The containerd client will add it's own accept header as a comma separated list of supported manifests.
  73. // This is perfectly fine, unless you are talking to an older registry which does not split the comma separated list,
  74. // so it is never able to match a media type and it falls back to schema1 (yuck) and fails because our manifest the
  75. // fallback does not support plugin configs...
  76. logrus.WithError(err).WithField("ref", withDomain).Debug("Error while resolving reference, falling back to backwards compatible accept header format")
  77. headers := http.Header{}
  78. headers.Add("Accept", images.MediaTypeDockerSchema2Manifest)
  79. headers.Add("Accept", images.MediaTypeDockerSchema2ManifestList)
  80. headers.Add("Accept", specs.MediaTypeImageManifest)
  81. headers.Add("Accept", specs.MediaTypeImageIndex)
  82. resolver, _ = pm.newResolver(ctx, nil, auth, headers, false)
  83. if resolver != nil {
  84. resolved, desc, err = resolver.Resolve(ctx, withDomain.String())
  85. if err != nil {
  86. logrus.WithError(err).WithField("ref", withDomain).Debug("Failed to resolve reference after falling back to backwards compatible accept header format")
  87. }
  88. }
  89. if err != nil {
  90. return errors.Wrap(err, "error resolving plugin reference")
  91. }
  92. }
  93. fetcher, err := resolver.Fetcher(ctx, resolved)
  94. if err != nil {
  95. return errors.Wrap(err, "error creating plugin image fetcher")
  96. }
  97. fp := withFetchProgress(pm.blobStore, out, ref)
  98. handlers = append([]images.Handler{fp, remotes.FetchHandler(pm.blobStore, fetcher)}, handlers...)
  99. if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil {
  100. return err
  101. }
  102. return nil
  103. }
  104. // applyLayer makes an images.HandlerFunc which applies a fetched image rootfs layer to a directory.
  105. //
  106. // TODO(@cpuguy83) This gets run sequentially after layer pull (makes sense), however
  107. // if there are multiple layers to fetch we may end up extracting layers in the wrong
  108. // order.
  109. func applyLayer(cs content.Store, dir string, out progress.Output) images.HandlerFunc {
  110. return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
  111. switch desc.MediaType {
  112. case
  113. specs.MediaTypeImageLayer,
  114. images.MediaTypeDockerSchema2Layer,
  115. specs.MediaTypeImageLayerGzip,
  116. images.MediaTypeDockerSchema2LayerGzip:
  117. default:
  118. return nil, nil
  119. }
  120. ra, err := cs.ReaderAt(ctx, desc)
  121. if err != nil {
  122. return nil, errors.Wrapf(err, "error getting content from content store for digest %s", desc.Digest)
  123. }
  124. id := stringid.TruncateID(desc.Digest.String())
  125. rc := ioutils.NewReadCloserWrapper(content.NewReader(ra), ra.Close)
  126. pr := progress.NewProgressReader(rc, out, desc.Size, id, "Extracting")
  127. defer pr.Close()
  128. if _, err := chrootarchive.ApplyLayer(dir, pr); err != nil {
  129. return nil, errors.Wrapf(err, "error applying layer for digest %s", desc.Digest)
  130. }
  131. progress.Update(out, id, "Complete")
  132. return nil, nil
  133. }
  134. }
  135. func childrenHandler(cs content.Store) images.HandlerFunc {
  136. ch := images.ChildrenHandler(cs)
  137. return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
  138. switch desc.MediaType {
  139. case mediaTypePluginConfig:
  140. return nil, nil
  141. default:
  142. return ch(ctx, desc)
  143. }
  144. }
  145. }
  146. type fetchMeta struct {
  147. blobs []digest.Digest
  148. config digest.Digest
  149. manifest digest.Digest
  150. }
  151. func storeFetchMetadata(m *fetchMeta) images.HandlerFunc {
  152. return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
  153. switch desc.MediaType {
  154. case
  155. images.MediaTypeDockerSchema2LayerForeignGzip,
  156. images.MediaTypeDockerSchema2Layer,
  157. specs.MediaTypeImageLayer,
  158. specs.MediaTypeImageLayerGzip:
  159. m.blobs = append(m.blobs, desc.Digest)
  160. case specs.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
  161. m.manifest = desc.Digest
  162. case mediaTypePluginConfig:
  163. m.config = desc.Digest
  164. }
  165. return nil, nil
  166. }
  167. }
  168. func validateFetchedMetadata(md fetchMeta) error {
  169. if md.config == "" {
  170. return errors.New("fetched plugin image but plugin config is missing")
  171. }
  172. if md.manifest == "" {
  173. return errors.New("fetched plugin image but manifest is missing")
  174. }
  175. return nil
  176. }
  177. // withFetchProgress is a fetch handler which registers a descriptor with a progress
  178. func withFetchProgress(cs content.Store, out progress.Output, ref reference.Named) images.HandlerFunc {
  179. return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
  180. switch desc.MediaType {
  181. case specs.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
  182. tn := reference.TagNameOnly(ref)
  183. tagged := tn.(reference.Tagged)
  184. progress.Messagef(out, tagged.Tag(), "Pulling from %s", reference.FamiliarName(ref))
  185. progress.Messagef(out, "", "Digest: %s", desc.Digest.String())
  186. return nil, nil
  187. case
  188. images.MediaTypeDockerSchema2LayerGzip,
  189. images.MediaTypeDockerSchema2Layer,
  190. specs.MediaTypeImageLayer,
  191. specs.MediaTypeImageLayerGzip:
  192. default:
  193. return nil, nil
  194. }
  195. id := stringid.TruncateID(desc.Digest.String())
  196. if _, err := cs.Info(ctx, desc.Digest); err == nil {
  197. out.WriteProgress(progress.Progress{ID: id, Action: "Already exists", LastUpdate: true})
  198. return nil, nil
  199. }
  200. progress.Update(out, id, "Waiting")
  201. key := remotes.MakeRefKey(ctx, desc)
  202. go func() {
  203. timer := time.NewTimer(100 * time.Millisecond)
  204. if !timer.Stop() {
  205. <-timer.C
  206. }
  207. defer timer.Stop()
  208. var pulling bool
  209. var ctxErr error
  210. for {
  211. timer.Reset(100 * time.Millisecond)
  212. select {
  213. case <-ctx.Done():
  214. ctxErr = ctx.Err()
  215. // make sure we can still fetch from the content store
  216. // TODO: Might need to add some sort of timeout
  217. ctx = context.Background()
  218. case <-timer.C:
  219. }
  220. s, err := cs.Status(ctx, key)
  221. if err != nil {
  222. if !c8derrdefs.IsNotFound(err) {
  223. logrus.WithError(err).WithField("layerDigest", desc.Digest.String()).Error("Error looking up status of plugin layer pull")
  224. progress.Update(out, id, err.Error())
  225. return
  226. }
  227. if _, err := cs.Info(ctx, desc.Digest); err == nil {
  228. progress.Update(out, id, "Download complete")
  229. return
  230. }
  231. if ctxErr != nil {
  232. progress.Update(out, id, ctxErr.Error())
  233. return
  234. }
  235. continue
  236. }
  237. if !pulling {
  238. progress.Update(out, id, "Pulling fs layer")
  239. pulling = true
  240. }
  241. if s.Offset == s.Total {
  242. out.WriteProgress(progress.Progress{ID: id, Action: "Download complete", Current: s.Offset, LastUpdate: true})
  243. return
  244. }
  245. out.WriteProgress(progress.Progress{ID: id, Action: "Downloading", Current: s.Offset, Total: s.Total})
  246. }
  247. }()
  248. return nil, nil
  249. }
  250. }