pull_v2.go 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106
  1. package distribution // import "github.com/docker/docker/distribution"
  2. import (
  3. "context"
  4. "encoding/json"
  5. "fmt"
  6. "io"
  7. "os"
  8. "runtime"
  9. "strings"
  10. "time"
  11. "github.com/containerd/containerd/log"
  12. "github.com/containerd/containerd/platforms"
  13. "github.com/docker/distribution"
  14. "github.com/docker/distribution/manifest/manifestlist"
  15. "github.com/docker/distribution/manifest/ocischema"
  16. "github.com/docker/distribution/manifest/schema1"
  17. "github.com/docker/distribution/manifest/schema2"
  18. "github.com/docker/distribution/reference"
  19. "github.com/docker/distribution/registry/client/transport"
  20. "github.com/docker/docker/distribution/metadata"
  21. "github.com/docker/docker/distribution/xfer"
  22. "github.com/docker/docker/image"
  23. v1 "github.com/docker/docker/image/v1"
  24. "github.com/docker/docker/layer"
  25. "github.com/docker/docker/pkg/ioutils"
  26. "github.com/docker/docker/pkg/progress"
  27. "github.com/docker/docker/pkg/stringid"
  28. "github.com/docker/docker/pkg/system"
  29. refstore "github.com/docker/docker/reference"
  30. "github.com/docker/docker/registry"
  31. "github.com/opencontainers/go-digest"
  32. ocispec "github.com/opencontainers/image-spec/specs-go/v1"
  33. "github.com/pkg/errors"
  34. "github.com/sirupsen/logrus"
  35. archvariant "github.com/tonistiigi/go-archvariant"
  36. )
  37. var (
  38. errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
  39. errRootFSInvalid = errors.New("invalid rootfs in image configuration")
  40. )
  41. // imageConfigPullError is an error pulling the image config blob
  42. // (only applies to schema2).
  43. type imageConfigPullError struct {
  44. Err error
  45. }
  46. // Error returns the error string for imageConfigPullError.
  47. func (e imageConfigPullError) Error() string {
  48. return "error pulling image configuration: " + e.Err.Error()
  49. }
  50. // newPuller returns a puller to pull from a v2 registry.
  51. func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, config *ImagePullConfig, local ContentStore) *puller {
  52. return &puller{
  53. metadataService: metadata.NewV2MetadataService(config.MetadataStore),
  54. endpoint: endpoint,
  55. config: config,
  56. repoInfo: repoInfo,
  57. manifestStore: &manifestStore{
  58. local: local,
  59. },
  60. }
  61. }
  62. type puller struct {
  63. metadataService metadata.V2MetadataService
  64. endpoint registry.APIEndpoint
  65. config *ImagePullConfig
  66. repoInfo *registry.RepositoryInfo
  67. repo distribution.Repository
  68. manifestStore *manifestStore
  69. }
  70. func (p *puller) pull(ctx context.Context, ref reference.Named) (err error) {
  71. // TODO(tiborvass): was ReceiveTimeout
  72. p.repo, err = newRepository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
  73. if err != nil {
  74. log.G(ctx).Warnf("Error getting v2 registry: %v", err)
  75. return err
  76. }
  77. p.manifestStore.remote, err = p.repo.Manifests(ctx)
  78. if err != nil {
  79. return err
  80. }
  81. if err = p.pullRepository(ctx, ref); err != nil {
  82. if _, ok := err.(fallbackError); ok {
  83. return err
  84. }
  85. if continueOnError(err, p.endpoint.Mirror) {
  86. return fallbackError{
  87. err: err,
  88. transportOK: true,
  89. }
  90. }
  91. }
  92. return err
  93. }
  94. func (p *puller) pullRepository(ctx context.Context, ref reference.Named) (err error) {
  95. var layersDownloaded bool
  96. if !reference.IsNameOnly(ref) {
  97. layersDownloaded, err = p.pullTag(ctx, ref, p.config.Platform)
  98. if err != nil {
  99. return err
  100. }
  101. } else {
  102. tags, err := p.repo.Tags(ctx).All(ctx)
  103. if err != nil {
  104. return err
  105. }
  106. for _, tag := range tags {
  107. tagRef, err := reference.WithTag(ref, tag)
  108. if err != nil {
  109. return err
  110. }
  111. pulledNew, err := p.pullTag(ctx, tagRef, p.config.Platform)
  112. if err != nil {
  113. // Since this is the pull-all-tags case, don't
  114. // allow an error pulling a particular tag to
  115. // make the whole pull fall back to v1.
  116. if fallbackErr, ok := err.(fallbackError); ok {
  117. return fallbackErr.err
  118. }
  119. return err
  120. }
  121. // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
  122. // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
  123. layersDownloaded = layersDownloaded || pulledNew
  124. }
  125. }
  126. p.writeStatus(reference.FamiliarString(ref), layersDownloaded)
  127. return nil
  128. }
  129. // writeStatus writes a status message to out. If layersDownloaded is true, the
  130. // status message indicates that a newer image was downloaded. Otherwise, it
  131. // indicates that the image is up to date. requestedTag is the tag the message
  132. // will refer to.
  133. func (p *puller) writeStatus(requestedTag string, layersDownloaded bool) {
  134. if layersDownloaded {
  135. progress.Message(p.config.ProgressOutput, "", "Status: Downloaded newer image for "+requestedTag)
  136. } else {
  137. progress.Message(p.config.ProgressOutput, "", "Status: Image is up to date for "+requestedTag)
  138. }
  139. }
  140. type layerDescriptor struct {
  141. digest digest.Digest
  142. diffID layer.DiffID
  143. repoInfo *registry.RepositoryInfo
  144. repo distribution.Repository
  145. metadataService metadata.V2MetadataService
  146. tmpFile *os.File
  147. verifier digest.Verifier
  148. src distribution.Descriptor
  149. }
  150. func (ld *layerDescriptor) Key() string {
  151. return "v2:" + ld.digest.String()
  152. }
  153. func (ld *layerDescriptor) ID() string {
  154. return stringid.TruncateID(ld.digest.String())
  155. }
  156. func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
  157. if ld.diffID != "" {
  158. return ld.diffID, nil
  159. }
  160. return ld.metadataService.GetDiffID(ld.digest)
  161. }
  162. func (ld *layerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
  163. log.G(ctx).Debugf("pulling blob %q", ld.digest)
  164. var (
  165. err error
  166. offset int64
  167. )
  168. if ld.tmpFile == nil {
  169. ld.tmpFile, err = createDownloadFile()
  170. if err != nil {
  171. return nil, 0, xfer.DoNotRetry{Err: err}
  172. }
  173. } else {
  174. offset, err = ld.tmpFile.Seek(0, io.SeekEnd)
  175. if err != nil {
  176. log.G(ctx).Debugf("error seeking to end of download file: %v", err)
  177. offset = 0
  178. ld.tmpFile.Close()
  179. if err := os.Remove(ld.tmpFile.Name()); err != nil {
  180. log.G(ctx).Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
  181. }
  182. ld.tmpFile, err = createDownloadFile()
  183. if err != nil {
  184. return nil, 0, xfer.DoNotRetry{Err: err}
  185. }
  186. } else if offset != 0 {
  187. log.G(ctx).Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
  188. }
  189. }
  190. tmpFile := ld.tmpFile
  191. layerDownload, err := ld.open(ctx)
  192. if err != nil {
  193. log.G(ctx).Errorf("Error initiating layer download: %v", err)
  194. return nil, 0, retryOnError(err)
  195. }
  196. if offset != 0 {
  197. _, err := layerDownload.Seek(offset, io.SeekStart)
  198. if err != nil {
  199. if err := ld.truncateDownloadFile(); err != nil {
  200. return nil, 0, xfer.DoNotRetry{Err: err}
  201. }
  202. return nil, 0, err
  203. }
  204. }
  205. size, err := layerDownload.Seek(0, io.SeekEnd)
  206. if err != nil {
  207. // Seek failed, perhaps because there was no Content-Length
  208. // header. This shouldn't fail the download, because we can
  209. // still continue without a progress bar.
  210. size = 0
  211. } else {
  212. if size != 0 && offset > size {
  213. log.G(ctx).Debug("Partial download is larger than full blob. Starting over")
  214. offset = 0
  215. if err := ld.truncateDownloadFile(); err != nil {
  216. return nil, 0, xfer.DoNotRetry{Err: err}
  217. }
  218. }
  219. // Restore the seek offset either at the beginning of the
  220. // stream, or just after the last byte we have from previous
  221. // attempts.
  222. _, err = layerDownload.Seek(offset, io.SeekStart)
  223. if err != nil {
  224. return nil, 0, err
  225. }
  226. }
  227. reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
  228. defer reader.Close()
  229. if ld.verifier == nil {
  230. ld.verifier = ld.digest.Verifier()
  231. }
  232. _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
  233. if err != nil {
  234. if err == transport.ErrWrongCodeForByteRange {
  235. if err := ld.truncateDownloadFile(); err != nil {
  236. return nil, 0, xfer.DoNotRetry{Err: err}
  237. }
  238. return nil, 0, err
  239. }
  240. return nil, 0, retryOnError(err)
  241. }
  242. progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
  243. if !ld.verifier.Verified() {
  244. err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
  245. log.G(ctx).Error(err)
  246. // Allow a retry if this digest verification error happened
  247. // after a resumed download.
  248. if offset != 0 {
  249. if err := ld.truncateDownloadFile(); err != nil {
  250. return nil, 0, xfer.DoNotRetry{Err: err}
  251. }
  252. return nil, 0, err
  253. }
  254. return nil, 0, xfer.DoNotRetry{Err: err}
  255. }
  256. progress.Update(progressOutput, ld.ID(), "Download complete")
  257. log.G(ctx).Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
  258. _, err = tmpFile.Seek(0, io.SeekStart)
  259. if err != nil {
  260. tmpFile.Close()
  261. if err := os.Remove(tmpFile.Name()); err != nil {
  262. log.G(ctx).Errorf("Failed to remove temp file: %s", tmpFile.Name())
  263. }
  264. ld.tmpFile = nil
  265. ld.verifier = nil
  266. return nil, 0, xfer.DoNotRetry{Err: err}
  267. }
  268. // hand off the temporary file to the download manager, so it will only
  269. // be closed once
  270. ld.tmpFile = nil
  271. return ioutils.NewReadCloserWrapper(tmpFile, func() error {
  272. tmpFile.Close()
  273. err := os.RemoveAll(tmpFile.Name())
  274. if err != nil {
  275. log.G(ctx).Errorf("Failed to remove temp file: %s", tmpFile.Name())
  276. }
  277. return err
  278. }), size, nil
  279. }
  280. func (ld *layerDescriptor) Close() {
  281. if ld.tmpFile != nil {
  282. ld.tmpFile.Close()
  283. if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
  284. log.G(context.TODO()).Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
  285. }
  286. }
  287. }
  288. func (ld *layerDescriptor) truncateDownloadFile() error {
  289. // Need a new hash context since we will be redoing the download
  290. ld.verifier = nil
  291. if _, err := ld.tmpFile.Seek(0, io.SeekStart); err != nil {
  292. log.G(context.TODO()).Errorf("error seeking to beginning of download file: %v", err)
  293. return err
  294. }
  295. if err := ld.tmpFile.Truncate(0); err != nil {
  296. log.G(context.TODO()).Errorf("error truncating download file: %v", err)
  297. return err
  298. }
  299. return nil
  300. }
  301. func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
  302. // Cache mapping from this layer's DiffID to the blobsum
  303. _ = ld.metadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()})
  304. }
  305. func (p *puller) pullTag(ctx context.Context, ref reference.Named, platform *ocispec.Platform) (tagUpdated bool, err error) {
  306. var (
  307. tagOrDigest string // Used for logging/progress only
  308. dgst digest.Digest
  309. mt string
  310. size int64
  311. tagged reference.NamedTagged
  312. isTagged bool
  313. )
  314. if digested, isDigested := ref.(reference.Canonical); isDigested {
  315. dgst = digested.Digest()
  316. tagOrDigest = digested.String()
  317. } else if tagged, isTagged = ref.(reference.NamedTagged); isTagged {
  318. tagService := p.repo.Tags(ctx)
  319. desc, err := tagService.Get(ctx, tagged.Tag())
  320. if err != nil {
  321. return false, err
  322. }
  323. dgst = desc.Digest
  324. tagOrDigest = tagged.Tag()
  325. mt = desc.MediaType
  326. size = desc.Size
  327. } else {
  328. return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref))
  329. }
  330. ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
  331. logrus.Fields{
  332. "digest": dgst,
  333. "remote": ref,
  334. }))
  335. desc := ocispec.Descriptor{
  336. MediaType: mt,
  337. Digest: dgst,
  338. Size: size,
  339. }
  340. manifest, err := p.manifestStore.Get(ctx, desc, ref)
  341. if err != nil {
  342. if isTagged && isNotFound(errors.Cause(err)) {
  343. log.G(ctx).WithField("ref", ref).WithError(err).Debug("Falling back to pull manifest by tag")
  344. msg := `%s Failed to pull manifest by the resolved digest. This registry does not
  345. appear to conform to the distribution registry specification; falling back to
  346. pull by tag. This fallback is DEPRECATED, and will be removed in a future
  347. release. Please contact admins of %s. %s
  348. `
  349. warnEmoji := "\U000026A0\U0000FE0F"
  350. progress.Messagef(p.config.ProgressOutput, "WARNING", msg, warnEmoji, p.endpoint.URL, warnEmoji)
  351. // Fetch by tag worked, but fetch by digest didn't.
  352. // This is a broken registry implementation.
  353. // We'll fallback to the old behavior and get the manifest by tag.
  354. var ms distribution.ManifestService
  355. ms, err = p.repo.Manifests(ctx)
  356. if err != nil {
  357. return false, err
  358. }
  359. manifest, err = ms.Get(ctx, "", distribution.WithTag(tagged.Tag()))
  360. err = errors.Wrap(err, "error after falling back to get manifest by tag")
  361. }
  362. if err != nil {
  363. return false, err
  364. }
  365. }
  366. if manifest == nil {
  367. return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
  368. }
  369. if m, ok := manifest.(*schema2.DeserializedManifest); ok {
  370. if err := p.validateMediaType(m.Manifest.Config.MediaType); err != nil {
  371. return false, err
  372. }
  373. }
  374. log.G(ctx).Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref))
  375. progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named()))
  376. var (
  377. id digest.Digest
  378. manifestDigest digest.Digest
  379. )
  380. switch v := manifest.(type) {
  381. case *schema1.SignedManifest:
  382. // give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago
  383. // TODO: condition to be removed
  384. if reference.Domain(ref) == "docker.io" {
  385. msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
  386. log.G(ctx).Warn(msg)
  387. progress.Message(p.config.ProgressOutput, "", msg)
  388. }
  389. id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform)
  390. if err != nil {
  391. return false, err
  392. }
  393. case *schema2.DeserializedManifest:
  394. id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform)
  395. if err != nil {
  396. return false, err
  397. }
  398. case *ocischema.DeserializedManifest:
  399. id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform)
  400. if err != nil {
  401. return false, err
  402. }
  403. case *manifestlist.DeserializedManifestList:
  404. id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform)
  405. if err != nil {
  406. return false, err
  407. }
  408. default:
  409. return false, invalidManifestFormatError{}
  410. }
  411. progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
  412. if p.config.ReferenceStore != nil {
  413. oldTagID, err := p.config.ReferenceStore.Get(ref)
  414. if err == nil {
  415. if oldTagID == id {
  416. return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
  417. }
  418. } else if err != refstore.ErrDoesNotExist {
  419. return false, err
  420. }
  421. if canonical, ok := ref.(reference.Canonical); ok {
  422. if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
  423. return false, err
  424. }
  425. } else {
  426. if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
  427. return false, err
  428. }
  429. if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
  430. return false, err
  431. }
  432. }
  433. }
  434. return true, nil
  435. }
  436. // validateMediaType validates if the given mediaType is accepted by the puller's
  437. // configuration.
  438. func (p *puller) validateMediaType(mediaType string) error {
  439. var allowedMediaTypes []string
  440. if len(p.config.Schema2Types) > 0 {
  441. allowedMediaTypes = p.config.Schema2Types
  442. } else {
  443. allowedMediaTypes = defaultImageTypes
  444. }
  445. for _, t := range allowedMediaTypes {
  446. if mediaType == t {
  447. return nil
  448. }
  449. }
  450. configClass := mediaTypeClasses[mediaType]
  451. if configClass == "" {
  452. configClass = "unknown"
  453. }
  454. return invalidManifestClassError{mediaType, configClass}
  455. }
  456. func (p *puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *ocispec.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
  457. if platform != nil {
  458. // Early bath if the requested OS doesn't match that of the configuration.
  459. // This avoids doing the download, only to potentially fail later.
  460. if !system.IsOSSupported(platform.OS) {
  461. return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", runtime.GOOS, platform.OS)
  462. }
  463. }
  464. var verifiedManifest *schema1.Manifest
  465. verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
  466. if err != nil {
  467. return "", "", err
  468. }
  469. rootFS := image.NewRootFS()
  470. // remove duplicate layers and check parent chain validity
  471. err = fixManifestLayers(verifiedManifest)
  472. if err != nil {
  473. return "", "", err
  474. }
  475. var descriptors []xfer.DownloadDescriptor
  476. // Image history converted to the new format
  477. var history []image.History
  478. // Note that the order of this loop is in the direction of bottom-most
  479. // to top-most, so that the downloads slice gets ordered correctly.
  480. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
  481. blobSum := verifiedManifest.FSLayers[i].BlobSum
  482. if err = blobSum.Validate(); err != nil {
  483. return "", "", errors.Wrapf(err, "could not validate layer digest %q", blobSum)
  484. }
  485. var throwAway struct {
  486. ThrowAway bool `json:"throwaway,omitempty"`
  487. }
  488. if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
  489. return "", "", err
  490. }
  491. h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
  492. if err != nil {
  493. return "", "", err
  494. }
  495. history = append(history, h)
  496. if throwAway.ThrowAway {
  497. continue
  498. }
  499. layerDescriptor := &layerDescriptor{
  500. digest: blobSum,
  501. repoInfo: p.repoInfo,
  502. repo: p.repo,
  503. metadataService: p.metadataService,
  504. }
  505. descriptors = append(descriptors, layerDescriptor)
  506. }
  507. resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
  508. if err != nil {
  509. return "", "", err
  510. }
  511. defer release()
  512. config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
  513. if err != nil {
  514. return "", "", err
  515. }
  516. imageID, err := p.config.ImageStore.Put(ctx, config)
  517. if err != nil {
  518. return "", "", err
  519. }
  520. manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
  521. return imageID, manifestDigest, nil
  522. }
  523. func checkSupportedMediaType(mediaType string) error {
  524. lowerMt := strings.ToLower(mediaType)
  525. for _, mt := range supportedMediaTypes {
  526. // The should either be an exact match, or have a valid prefix
  527. // we append a "." when matching prefixes to exclude "false positives";
  528. // for example, we don't want to match "application/vnd.oci.images_are_fun_yolo".
  529. if lowerMt == mt || strings.HasPrefix(lowerMt, mt+".") {
  530. return nil
  531. }
  532. }
  533. return unsupportedMediaTypeError{MediaType: mediaType}
  534. }
  535. func (p *puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *ocispec.Platform) (id digest.Digest, err error) {
  536. if _, err := p.config.ImageStore.Get(ctx, target.Digest); err == nil {
  537. // If the image already exists locally, no need to pull
  538. // anything.
  539. return target.Digest, nil
  540. }
  541. if err := checkSupportedMediaType(target.MediaType); err != nil {
  542. return "", err
  543. }
  544. var descriptors []xfer.DownloadDescriptor
  545. // Note that the order of this loop is in the direction of bottom-most
  546. // to top-most, so that the downloads slice gets ordered correctly.
  547. for _, d := range layers {
  548. if err := d.Digest.Validate(); err != nil {
  549. return "", errors.Wrapf(err, "could not validate layer digest %q", d.Digest)
  550. }
  551. if err := checkSupportedMediaType(d.MediaType); err != nil {
  552. return "", err
  553. }
  554. layerDescriptor := &layerDescriptor{
  555. digest: d.Digest,
  556. repo: p.repo,
  557. repoInfo: p.repoInfo,
  558. metadataService: p.metadataService,
  559. src: d,
  560. }
  561. descriptors = append(descriptors, layerDescriptor)
  562. }
  563. configChan := make(chan []byte, 1)
  564. configErrChan := make(chan error, 1)
  565. layerErrChan := make(chan error, 1)
  566. downloadsDone := make(chan struct{})
  567. var cancel func()
  568. ctx, cancel = context.WithCancel(ctx)
  569. defer cancel()
  570. // Pull the image config
  571. go func() {
  572. configJSON, err := p.pullSchema2Config(ctx, target.Digest)
  573. if err != nil {
  574. configErrChan <- imageConfigPullError{Err: err}
  575. cancel()
  576. return
  577. }
  578. configChan <- configJSON
  579. }()
  580. var (
  581. configJSON []byte // raw serialized image config
  582. downloadedRootFS *image.RootFS // rootFS from registered layers
  583. configRootFS *image.RootFS // rootFS from configuration
  584. release func() // release resources from rootFS download
  585. configPlatform *ocispec.Platform // for LCOW when registering downloaded layers
  586. )
  587. layerStoreOS := runtime.GOOS
  588. if platform != nil {
  589. layerStoreOS = platform.OS
  590. }
  591. // https://github.com/docker/docker/issues/24766 - Err on the side of caution,
  592. // explicitly blocking images intended for linux from the Windows daemon. On
  593. // Windows, we do this before the attempt to download, effectively serialising
  594. // the download slightly slowing it down. We have to do it this way, as
  595. // chances are the download of layers itself would fail due to file names
  596. // which aren't suitable for NTFS. At some point in the future, if a similar
  597. // check to block Windows images being pulled on Linux is implemented, it
  598. // may be necessary to perform the same type of serialisation.
  599. if runtime.GOOS == "windows" {
  600. configJSON, configRootFS, configPlatform, err = receiveConfig(configChan, configErrChan)
  601. if err != nil {
  602. return "", err
  603. }
  604. if configRootFS == nil {
  605. return "", errRootFSInvalid
  606. }
  607. if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil {
  608. return "", err
  609. }
  610. if len(descriptors) != len(configRootFS.DiffIDs) {
  611. return "", errRootFSMismatch
  612. }
  613. if platform == nil {
  614. // Early bath if the requested OS doesn't match that of the configuration.
  615. // This avoids doing the download, only to potentially fail later.
  616. if !system.IsOSSupported(configPlatform.OS) {
  617. return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS)
  618. }
  619. layerStoreOS = configPlatform.OS
  620. }
  621. // Populate diff ids in descriptors to avoid downloading foreign layers
  622. // which have been side loaded
  623. for i := range descriptors {
  624. descriptors[i].(*layerDescriptor).diffID = configRootFS.DiffIDs[i]
  625. }
  626. }
  627. // Assume that the operating system is the host OS if blank, and validate it
  628. // to ensure we don't cause a panic by an invalid index into the layerstores.
  629. if layerStoreOS != "" && !system.IsOSSupported(layerStoreOS) {
  630. return "", system.ErrNotSupportedOperatingSystem
  631. }
  632. if p.config.DownloadManager != nil {
  633. go func() {
  634. var (
  635. err error
  636. rootFS image.RootFS
  637. )
  638. downloadRootFS := *image.NewRootFS()
  639. rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
  640. if err != nil {
  641. // Intentionally do not cancel the config download here
  642. // as the error from config download (if there is one)
  643. // is more interesting than the layer download error
  644. layerErrChan <- err
  645. return
  646. }
  647. downloadedRootFS = &rootFS
  648. close(downloadsDone)
  649. }()
  650. } else {
  651. // We have nothing to download
  652. close(downloadsDone)
  653. }
  654. if configJSON == nil {
  655. configJSON, configRootFS, _, err = receiveConfig(configChan, configErrChan)
  656. if err == nil && configRootFS == nil {
  657. err = errRootFSInvalid
  658. }
  659. if err != nil {
  660. cancel()
  661. select {
  662. case <-downloadsDone:
  663. case <-layerErrChan:
  664. }
  665. return "", err
  666. }
  667. }
  668. select {
  669. case <-downloadsDone:
  670. case err = <-layerErrChan:
  671. return "", err
  672. }
  673. if release != nil {
  674. defer release()
  675. }
  676. if downloadedRootFS != nil {
  677. // The DiffIDs returned in rootFS MUST match those in the config.
  678. // Otherwise the image config could be referencing layers that aren't
  679. // included in the manifest.
  680. if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
  681. return "", errRootFSMismatch
  682. }
  683. for i := range downloadedRootFS.DiffIDs {
  684. if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
  685. return "", errRootFSMismatch
  686. }
  687. }
  688. }
  689. imageID, err := p.config.ImageStore.Put(ctx, configJSON)
  690. if err != nil {
  691. return "", err
  692. }
  693. return imageID, nil
  694. }
  695. func (p *puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *ocispec.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
  696. manifestDigest, err = schema2ManifestDigest(ref, mfst)
  697. if err != nil {
  698. return "", "", err
  699. }
  700. id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
  701. return id, manifestDigest, err
  702. }
  703. func (p *puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *ocispec.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
  704. manifestDigest, err = schema2ManifestDigest(ref, mfst)
  705. if err != nil {
  706. return "", "", err
  707. }
  708. id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform)
  709. return id, manifestDigest, err
  710. }
  711. func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *ocispec.Platform, error) {
  712. select {
  713. case configJSON := <-configChan:
  714. rootfs, err := rootFSFromConfig(configJSON)
  715. if err != nil {
  716. return nil, nil, nil, err
  717. }
  718. platform, err := platformFromConfig(configJSON)
  719. if err != nil {
  720. return nil, nil, nil, err
  721. }
  722. return configJSON, rootfs, platform, nil
  723. case err := <-errChan:
  724. return nil, nil, nil, err
  725. // Don't need a case for ctx.Done in the select because cancellation
  726. // will trigger an error in p.pullSchema2ImageConfig.
  727. }
  728. }
  729. // pullManifestList handles "manifest lists" which point to various
  730. // platform-specific manifests.
  731. func (p *puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *ocispec.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) {
  732. manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
  733. if err != nil {
  734. return "", "", err
  735. }
  736. var platform ocispec.Platform
  737. if pp != nil {
  738. platform = *pp
  739. }
  740. log.G(ctx).Debugf("%s resolved to a manifestList object with %d entries; looking for a %s match", ref, len(mfstList.Manifests), platforms.Format(platform))
  741. manifestMatches := filterManifests(mfstList.Manifests, platform)
  742. for _, match := range manifestMatches {
  743. if err := checkImageCompatibility(match.Platform.OS, match.Platform.OSVersion); err != nil {
  744. return "", "", err
  745. }
  746. desc := ocispec.Descriptor{
  747. Digest: match.Digest,
  748. Size: match.Size,
  749. MediaType: match.MediaType,
  750. }
  751. manifest, err := p.manifestStore.Get(ctx, desc, ref)
  752. if err != nil {
  753. return "", "", err
  754. }
  755. manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), match.Digest)
  756. if err != nil {
  757. return "", "", err
  758. }
  759. switch v := manifest.(type) {
  760. case *schema1.SignedManifest:
  761. msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref)
  762. log.G(ctx).Warn(msg)
  763. progress.Message(p.config.ProgressOutput, "", msg)
  764. platform := toOCIPlatform(match.Platform)
  765. id, _, err = p.pullSchema1(ctx, manifestRef, v, platform)
  766. if err != nil {
  767. return "", "", err
  768. }
  769. case *schema2.DeserializedManifest:
  770. platform := toOCIPlatform(match.Platform)
  771. id, _, err = p.pullSchema2(ctx, manifestRef, v, platform)
  772. if err != nil {
  773. return "", "", err
  774. }
  775. case *ocischema.DeserializedManifest:
  776. platform := toOCIPlatform(match.Platform)
  777. id, _, err = p.pullOCI(ctx, manifestRef, v, platform)
  778. if err != nil {
  779. return "", "", err
  780. }
  781. case *manifestlist.DeserializedManifestList:
  782. id, _, err = p.pullManifestList(ctx, manifestRef, v, pp)
  783. if err != nil {
  784. var noMatches noMatchesErr
  785. if !errors.As(err, &noMatches) {
  786. // test the next match
  787. continue
  788. }
  789. }
  790. default:
  791. // OCI spec requires to skip unknown manifest types
  792. continue
  793. }
  794. return id, manifestListDigest, err
  795. }
  796. return "", "", noMatchesErr{platform: platform}
  797. }
  798. const (
  799. defaultSchemaPullBackoff = 250 * time.Millisecond
  800. defaultMaxSchemaPullAttempts = 5
  801. )
  802. func (p *puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
  803. blobs := p.repo.Blobs(ctx)
  804. err = retry(ctx, defaultMaxSchemaPullAttempts, defaultSchemaPullBackoff, func(ctx context.Context) (err error) {
  805. configJSON, err = blobs.Get(ctx, dgst)
  806. return err
  807. })
  808. if err != nil {
  809. return nil, err
  810. }
  811. // Verify image config digest
  812. verifier := dgst.Verifier()
  813. if _, err := verifier.Write(configJSON); err != nil {
  814. return nil, err
  815. }
  816. if !verifier.Verified() {
  817. err := fmt.Errorf("image config verification failed for digest %s", dgst)
  818. log.G(ctx).Error(err)
  819. return nil, err
  820. }
  821. return configJSON, nil
  822. }
  823. type noMatchesErr struct {
  824. platform ocispec.Platform
  825. }
  826. func (e noMatchesErr) Error() string {
  827. return fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(e.platform))
  828. }
  829. func retry(ctx context.Context, maxAttempts int, sleep time.Duration, f func(ctx context.Context) error) (err error) {
  830. attempt := 0
  831. for ; attempt < maxAttempts; attempt++ {
  832. err = retryOnError(f(ctx))
  833. if err == nil {
  834. return nil
  835. }
  836. if xfer.IsDoNotRetryError(err) {
  837. break
  838. }
  839. if attempt+1 < maxAttempts {
  840. timer := time.NewTimer(sleep)
  841. select {
  842. case <-ctx.Done():
  843. timer.Stop()
  844. return ctx.Err()
  845. case <-timer.C:
  846. log.G(ctx).WithError(err).WithField("attempts", attempt+1).Debug("retrying after error")
  847. sleep *= 2
  848. }
  849. }
  850. }
  851. return errors.Wrapf(err, "download failed after attempts=%d", attempt+1)
  852. }
  853. // schema2ManifestDigest computes the manifest digest, and, if pulling by
  854. // digest, ensures that it matches the requested digest.
  855. func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
  856. _, canonical, err := mfst.Payload()
  857. if err != nil {
  858. return "", err
  859. }
  860. // If pull by digest, then verify the manifest digest.
  861. if digested, isDigested := ref.(reference.Canonical); isDigested {
  862. verifier := digested.Digest().Verifier()
  863. if _, err := verifier.Write(canonical); err != nil {
  864. return "", err
  865. }
  866. if !verifier.Verified() {
  867. err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
  868. log.G(context.TODO()).Error(err)
  869. return "", err
  870. }
  871. return digested.Digest(), nil
  872. }
  873. return digest.FromBytes(canonical), nil
  874. }
  875. func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) {
  876. // If pull by digest, then verify the manifest digest. NOTE: It is
  877. // important to do this first, before any other content validation. If the
  878. // digest cannot be verified, don't even bother with those other things.
  879. if digested, isCanonical := ref.(reference.Canonical); isCanonical {
  880. verifier := digested.Digest().Verifier()
  881. if _, err := verifier.Write(signedManifest.Canonical); err != nil {
  882. return nil, err
  883. }
  884. if !verifier.Verified() {
  885. err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
  886. log.G(context.TODO()).Error(err)
  887. return nil, err
  888. }
  889. }
  890. m = &signedManifest.Manifest
  891. if m.SchemaVersion != 1 {
  892. return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref))
  893. }
  894. if len(m.FSLayers) != len(m.History) {
  895. return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref))
  896. }
  897. if len(m.FSLayers) == 0 {
  898. return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref))
  899. }
  900. return m, nil
  901. }
  902. // fixManifestLayers removes repeated layers from the manifest and checks the
  903. // correctness of the parent chain.
  904. func fixManifestLayers(m *schema1.Manifest) error {
  905. imgs := make([]*image.V1Image, len(m.FSLayers))
  906. for i := range m.FSLayers {
  907. img := &image.V1Image{}
  908. if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
  909. return err
  910. }
  911. imgs[i] = img
  912. if err := v1.ValidateID(img.ID); err != nil {
  913. return err
  914. }
  915. }
  916. if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
  917. // Windows base layer can point to a base layer parent that is not in manifest.
  918. return errors.New("invalid parent ID in the base layer of the image")
  919. }
  920. // check general duplicates to error instead of a deadlock
  921. idmap := make(map[string]struct{})
  922. var lastID string
  923. for _, img := range imgs {
  924. // skip IDs that appear after each other, we handle those later
  925. if _, exists := idmap[img.ID]; img.ID != lastID && exists {
  926. return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
  927. }
  928. lastID = img.ID
  929. idmap[lastID] = struct{}{}
  930. }
  931. // backwards loop so that we keep the remaining indexes after removing items
  932. for i := len(imgs) - 2; i >= 0; i-- {
  933. if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
  934. m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
  935. m.History = append(m.History[:i], m.History[i+1:]...)
  936. } else if imgs[i].Parent != imgs[i+1].ID {
  937. return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
  938. }
  939. }
  940. return nil
  941. }
  942. func createDownloadFile() (*os.File, error) {
  943. return os.CreateTemp("", "GetImageBlob")
  944. }
  945. func toOCIPlatform(p manifestlist.PlatformSpec) *ocispec.Platform {
  946. // distribution pkg does define platform as pointer so this hack for empty struct
  947. // is necessary. This is temporary until correct OCI image-spec package is used.
  948. if p.OS == "" && p.Architecture == "" && p.Variant == "" && p.OSVersion == "" && p.OSFeatures == nil && p.Features == nil {
  949. return nil
  950. }
  951. return &ocispec.Platform{
  952. OS: p.OS,
  953. Architecture: p.Architecture,
  954. Variant: p.Variant,
  955. OSFeatures: p.OSFeatures,
  956. OSVersion: p.OSVersion,
  957. }
  958. }
  959. // maximumSpec returns the distribution platform with maximum compatibility for the current node.
  960. func maximumSpec() ocispec.Platform {
  961. p := platforms.DefaultSpec()
  962. if p.Architecture == "amd64" {
  963. p.Variant = archvariant.AMD64Variant()
  964. }
  965. return p
  966. }