pull_v2.go 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976
  1. package distribution // import "github.com/docker/docker/distribution"
  2. import (
  3. "context"
  4. "encoding/json"
  5. "fmt"
  6. "io"
  7. "io/ioutil"
  8. "net/url"
  9. "os"
  10. "runtime"
  11. "strings"
  12. "github.com/containerd/containerd/platforms"
  13. "github.com/docker/distribution"
  14. "github.com/docker/distribution/manifest/manifestlist"
  15. "github.com/docker/distribution/manifest/schema1"
  16. "github.com/docker/distribution/manifest/schema2"
  17. "github.com/docker/distribution/reference"
  18. "github.com/docker/distribution/registry/api/errcode"
  19. "github.com/docker/distribution/registry/client/auth"
  20. "github.com/docker/distribution/registry/client/transport"
  21. "github.com/docker/docker/distribution/metadata"
  22. "github.com/docker/docker/distribution/xfer"
  23. "github.com/docker/docker/image"
  24. "github.com/docker/docker/image/v1"
  25. "github.com/docker/docker/layer"
  26. "github.com/docker/docker/pkg/ioutils"
  27. "github.com/docker/docker/pkg/progress"
  28. "github.com/docker/docker/pkg/stringid"
  29. "github.com/docker/docker/pkg/system"
  30. refstore "github.com/docker/docker/reference"
  31. "github.com/docker/docker/registry"
  32. "github.com/opencontainers/go-digest"
  33. specs "github.com/opencontainers/image-spec/specs-go/v1"
  34. "github.com/pkg/errors"
  35. "github.com/sirupsen/logrus"
  36. )
  37. var (
  38. errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
  39. errRootFSInvalid = errors.New("invalid rootfs in image configuration")
  40. )
  41. // ImageConfigPullError is an error pulling the image config blob
  42. // (only applies to schema2).
  43. type ImageConfigPullError struct {
  44. Err error
  45. }
  46. // Error returns the error string for ImageConfigPullError.
  47. func (e ImageConfigPullError) Error() string {
  48. return "error pulling image configuration: " + e.Err.Error()
  49. }
  50. type v2Puller struct {
  51. V2MetadataService metadata.V2MetadataService
  52. endpoint registry.APIEndpoint
  53. config *ImagePullConfig
  54. repoInfo *registry.RepositoryInfo
  55. repo distribution.Repository
  56. // confirmedV2 is set to true if we confirm we're talking to a v2
  57. // registry. This is used to limit fallbacks to the v1 protocol.
  58. confirmedV2 bool
  59. }
  60. func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) {
  61. // TODO(tiborvass): was ReceiveTimeout
  62. p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
  63. if err != nil {
  64. logrus.Warnf("Error getting v2 registry: %v", err)
  65. return err
  66. }
  67. if err = p.pullV2Repository(ctx, ref, platform); err != nil {
  68. if _, ok := err.(fallbackError); ok {
  69. return err
  70. }
  71. if continueOnError(err, p.endpoint.Mirror) {
  72. return fallbackError{
  73. err: err,
  74. confirmedV2: p.confirmedV2,
  75. transportOK: true,
  76. }
  77. }
  78. }
  79. return err
  80. }
  81. func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) {
  82. var layersDownloaded bool
  83. if !reference.IsNameOnly(ref) {
  84. layersDownloaded, err = p.pullV2Tag(ctx, ref, platform)
  85. if err != nil {
  86. return err
  87. }
  88. } else {
  89. tags, err := p.repo.Tags(ctx).All(ctx)
  90. if err != nil {
  91. // If this repository doesn't exist on V2, we should
  92. // permit a fallback to V1.
  93. return allowV1Fallback(err)
  94. }
  95. // The v2 registry knows about this repository, so we will not
  96. // allow fallback to the v1 protocol even if we encounter an
  97. // error later on.
  98. p.confirmedV2 = true
  99. for _, tag := range tags {
  100. tagRef, err := reference.WithTag(ref, tag)
  101. if err != nil {
  102. return err
  103. }
  104. pulledNew, err := p.pullV2Tag(ctx, tagRef, platform)
  105. if err != nil {
  106. // Since this is the pull-all-tags case, don't
  107. // allow an error pulling a particular tag to
  108. // make the whole pull fall back to v1.
  109. if fallbackErr, ok := err.(fallbackError); ok {
  110. return fallbackErr.err
  111. }
  112. return err
  113. }
  114. // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
  115. // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
  116. layersDownloaded = layersDownloaded || pulledNew
  117. }
  118. }
  119. writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded)
  120. return nil
  121. }
  122. type v2LayerDescriptor struct {
  123. digest digest.Digest
  124. diffID layer.DiffID
  125. repoInfo *registry.RepositoryInfo
  126. repo distribution.Repository
  127. V2MetadataService metadata.V2MetadataService
  128. tmpFile *os.File
  129. verifier digest.Verifier
  130. src distribution.Descriptor
  131. }
  132. func (ld *v2LayerDescriptor) Key() string {
  133. return "v2:" + ld.digest.String()
  134. }
  135. func (ld *v2LayerDescriptor) ID() string {
  136. return stringid.TruncateID(ld.digest.String())
  137. }
  138. func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
  139. if ld.diffID != "" {
  140. return ld.diffID, nil
  141. }
  142. return ld.V2MetadataService.GetDiffID(ld.digest)
  143. }
  144. func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
  145. logrus.Debugf("pulling blob %q", ld.digest)
  146. var (
  147. err error
  148. offset int64
  149. )
  150. if ld.tmpFile == nil {
  151. ld.tmpFile, err = createDownloadFile()
  152. if err != nil {
  153. return nil, 0, xfer.DoNotRetry{Err: err}
  154. }
  155. } else {
  156. offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
  157. if err != nil {
  158. logrus.Debugf("error seeking to end of download file: %v", err)
  159. offset = 0
  160. ld.tmpFile.Close()
  161. if err := os.Remove(ld.tmpFile.Name()); err != nil {
  162. logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
  163. }
  164. ld.tmpFile, err = createDownloadFile()
  165. if err != nil {
  166. return nil, 0, xfer.DoNotRetry{Err: err}
  167. }
  168. } else if offset != 0 {
  169. logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
  170. }
  171. }
  172. tmpFile := ld.tmpFile
  173. layerDownload, err := ld.open(ctx)
  174. if err != nil {
  175. logrus.Errorf("Error initiating layer download: %v", err)
  176. return nil, 0, retryOnError(err)
  177. }
  178. if offset != 0 {
  179. _, err := layerDownload.Seek(offset, os.SEEK_SET)
  180. if err != nil {
  181. if err := ld.truncateDownloadFile(); err != nil {
  182. return nil, 0, xfer.DoNotRetry{Err: err}
  183. }
  184. return nil, 0, err
  185. }
  186. }
  187. size, err := layerDownload.Seek(0, os.SEEK_END)
  188. if err != nil {
  189. // Seek failed, perhaps because there was no Content-Length
  190. // header. This shouldn't fail the download, because we can
  191. // still continue without a progress bar.
  192. size = 0
  193. } else {
  194. if size != 0 && offset > size {
  195. logrus.Debug("Partial download is larger than full blob. Starting over")
  196. offset = 0
  197. if err := ld.truncateDownloadFile(); err != nil {
  198. return nil, 0, xfer.DoNotRetry{Err: err}
  199. }
  200. }
  201. // Restore the seek offset either at the beginning of the
  202. // stream, or just after the last byte we have from previous
  203. // attempts.
  204. _, err = layerDownload.Seek(offset, os.SEEK_SET)
  205. if err != nil {
  206. return nil, 0, err
  207. }
  208. }
  209. reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
  210. defer reader.Close()
  211. if ld.verifier == nil {
  212. ld.verifier = ld.digest.Verifier()
  213. }
  214. _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
  215. if err != nil {
  216. if err == transport.ErrWrongCodeForByteRange {
  217. if err := ld.truncateDownloadFile(); err != nil {
  218. return nil, 0, xfer.DoNotRetry{Err: err}
  219. }
  220. return nil, 0, err
  221. }
  222. return nil, 0, retryOnError(err)
  223. }
  224. progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
  225. if !ld.verifier.Verified() {
  226. err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
  227. logrus.Error(err)
  228. // Allow a retry if this digest verification error happened
  229. // after a resumed download.
  230. if offset != 0 {
  231. if err := ld.truncateDownloadFile(); err != nil {
  232. return nil, 0, xfer.DoNotRetry{Err: err}
  233. }
  234. return nil, 0, err
  235. }
  236. return nil, 0, xfer.DoNotRetry{Err: err}
  237. }
  238. progress.Update(progressOutput, ld.ID(), "Download complete")
  239. logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
  240. _, err = tmpFile.Seek(0, os.SEEK_SET)
  241. if err != nil {
  242. tmpFile.Close()
  243. if err := os.Remove(tmpFile.Name()); err != nil {
  244. logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
  245. }
  246. ld.tmpFile = nil
  247. ld.verifier = nil
  248. return nil, 0, xfer.DoNotRetry{Err: err}
  249. }
  250. // hand off the temporary file to the download manager, so it will only
  251. // be closed once
  252. ld.tmpFile = nil
  253. return ioutils.NewReadCloserWrapper(tmpFile, func() error {
  254. tmpFile.Close()
  255. err := os.RemoveAll(tmpFile.Name())
  256. if err != nil {
  257. logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
  258. }
  259. return err
  260. }), size, nil
  261. }
  262. func (ld *v2LayerDescriptor) Close() {
  263. if ld.tmpFile != nil {
  264. ld.tmpFile.Close()
  265. if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
  266. logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
  267. }
  268. }
  269. }
  270. func (ld *v2LayerDescriptor) truncateDownloadFile() error {
  271. // Need a new hash context since we will be redoing the download
  272. ld.verifier = nil
  273. if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil {
  274. logrus.Errorf("error seeking to beginning of download file: %v", err)
  275. return err
  276. }
  277. if err := ld.tmpFile.Truncate(0); err != nil {
  278. logrus.Errorf("error truncating download file: %v", err)
  279. return err
  280. }
  281. return nil
  282. }
  283. func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
  284. // Cache mapping from this layer's DiffID to the blobsum
  285. ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()})
  286. }
  287. func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform *specs.Platform) (tagUpdated bool, err error) {
  288. manSvc, err := p.repo.Manifests(ctx)
  289. if err != nil {
  290. return false, err
  291. }
  292. var (
  293. manifest distribution.Manifest
  294. tagOrDigest string // Used for logging/progress only
  295. )
  296. if digested, isDigested := ref.(reference.Canonical); isDigested {
  297. manifest, err = manSvc.Get(ctx, digested.Digest())
  298. if err != nil {
  299. return false, err
  300. }
  301. tagOrDigest = digested.Digest().String()
  302. } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
  303. manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
  304. if err != nil {
  305. return false, allowV1Fallback(err)
  306. }
  307. tagOrDigest = tagged.Tag()
  308. } else {
  309. return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref))
  310. }
  311. if manifest == nil {
  312. return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
  313. }
  314. if m, ok := manifest.(*schema2.DeserializedManifest); ok {
  315. var allowedMediatype bool
  316. for _, t := range p.config.Schema2Types {
  317. if m.Manifest.Config.MediaType == t {
  318. allowedMediatype = true
  319. break
  320. }
  321. }
  322. if !allowedMediatype {
  323. configClass := mediaTypeClasses[m.Manifest.Config.MediaType]
  324. if configClass == "" {
  325. configClass = "unknown"
  326. }
  327. return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass}
  328. }
  329. }
  330. // If manSvc.Get succeeded, we can be confident that the registry on
  331. // the other side speaks the v2 protocol.
  332. p.confirmedV2 = true
  333. logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref))
  334. progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named()))
  335. var (
  336. id digest.Digest
  337. manifestDigest digest.Digest
  338. )
  339. switch v := manifest.(type) {
  340. case *schema1.SignedManifest:
  341. if p.config.RequireSchema2 {
  342. return false, fmt.Errorf("invalid manifest: not schema2")
  343. }
  344. id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform)
  345. if err != nil {
  346. return false, err
  347. }
  348. case *schema2.DeserializedManifest:
  349. id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform)
  350. if err != nil {
  351. return false, err
  352. }
  353. case *manifestlist.DeserializedManifestList:
  354. id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform)
  355. if err != nil {
  356. return false, err
  357. }
  358. default:
  359. return false, invalidManifestFormatError{}
  360. }
  361. progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
  362. if p.config.ReferenceStore != nil {
  363. oldTagID, err := p.config.ReferenceStore.Get(ref)
  364. if err == nil {
  365. if oldTagID == id {
  366. return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
  367. }
  368. } else if err != refstore.ErrDoesNotExist {
  369. return false, err
  370. }
  371. if canonical, ok := ref.(reference.Canonical); ok {
  372. if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
  373. return false, err
  374. }
  375. } else {
  376. if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
  377. return false, err
  378. }
  379. if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
  380. return false, err
  381. }
  382. }
  383. }
  384. return true, nil
  385. }
  386. func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
  387. var verifiedManifest *schema1.Manifest
  388. verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
  389. if err != nil {
  390. return "", "", err
  391. }
  392. rootFS := image.NewRootFS()
  393. // remove duplicate layers and check parent chain validity
  394. err = fixManifestLayers(verifiedManifest)
  395. if err != nil {
  396. return "", "", err
  397. }
  398. var descriptors []xfer.DownloadDescriptor
  399. // Image history converted to the new format
  400. var history []image.History
  401. // Note that the order of this loop is in the direction of bottom-most
  402. // to top-most, so that the downloads slice gets ordered correctly.
  403. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
  404. blobSum := verifiedManifest.FSLayers[i].BlobSum
  405. var throwAway struct {
  406. ThrowAway bool `json:"throwaway,omitempty"`
  407. }
  408. if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
  409. return "", "", err
  410. }
  411. h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
  412. if err != nil {
  413. return "", "", err
  414. }
  415. history = append(history, h)
  416. if throwAway.ThrowAway {
  417. continue
  418. }
  419. layerDescriptor := &v2LayerDescriptor{
  420. digest: blobSum,
  421. repoInfo: p.repoInfo,
  422. repo: p.repo,
  423. V2MetadataService: p.V2MetadataService,
  424. }
  425. descriptors = append(descriptors, layerDescriptor)
  426. }
  427. // The v1 manifest itself doesn't directly contain an OS. However,
  428. // the history does, but unfortunately that's a string, so search through
  429. // all the history until hopefully we find one which indicates the OS.
  430. // supertest2014/nyan is an example of a registry image with schemav1.
  431. configOS := runtime.GOOS
  432. if system.LCOWSupported() {
  433. type config struct {
  434. Os string `json:"os,omitempty"`
  435. }
  436. for _, v := range verifiedManifest.History {
  437. var c config
  438. if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil {
  439. if c.Os != "" {
  440. configOS = c.Os
  441. break
  442. }
  443. }
  444. }
  445. }
  446. // In the situation that the API call didn't specify an OS explicitly, but
  447. // we support the operating system, switch to that operating system.
  448. // eg FROM supertest2014/nyan with no platform specifier, and docker build
  449. // with no --platform= flag under LCOW.
  450. requestedOS := ""
  451. if platform != nil {
  452. requestedOS = platform.OS
  453. } else if system.IsOSSupported(configOS) {
  454. requestedOS = configOS
  455. }
  456. // Early bath if the requested OS doesn't match that of the configuration.
  457. // This avoids doing the download, only to potentially fail later.
  458. if !strings.EqualFold(configOS, requestedOS) {
  459. return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
  460. }
  461. resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput)
  462. if err != nil {
  463. return "", "", err
  464. }
  465. defer release()
  466. config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
  467. if err != nil {
  468. return "", "", err
  469. }
  470. imageID, err := p.config.ImageStore.Put(config)
  471. if err != nil {
  472. return "", "", err
  473. }
  474. manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
  475. return imageID, manifestDigest, nil
  476. }
  477. func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) {
  478. manifestDigest, err = schema2ManifestDigest(ref, mfst)
  479. if err != nil {
  480. return "", "", err
  481. }
  482. target := mfst.Target()
  483. if _, err := p.config.ImageStore.Get(target.Digest); err == nil {
  484. // If the image already exists locally, no need to pull
  485. // anything.
  486. return target.Digest, manifestDigest, nil
  487. }
  488. var descriptors []xfer.DownloadDescriptor
  489. // Note that the order of this loop is in the direction of bottom-most
  490. // to top-most, so that the downloads slice gets ordered correctly.
  491. for _, d := range mfst.Layers {
  492. layerDescriptor := &v2LayerDescriptor{
  493. digest: d.Digest,
  494. repo: p.repo,
  495. repoInfo: p.repoInfo,
  496. V2MetadataService: p.V2MetadataService,
  497. src: d,
  498. }
  499. descriptors = append(descriptors, layerDescriptor)
  500. }
  501. configChan := make(chan []byte, 1)
  502. configErrChan := make(chan error, 1)
  503. layerErrChan := make(chan error, 1)
  504. downloadsDone := make(chan struct{})
  505. var cancel func()
  506. ctx, cancel = context.WithCancel(ctx)
  507. defer cancel()
  508. // Pull the image config
  509. go func() {
  510. configJSON, err := p.pullSchema2Config(ctx, target.Digest)
  511. if err != nil {
  512. configErrChan <- ImageConfigPullError{Err: err}
  513. cancel()
  514. return
  515. }
  516. configChan <- configJSON
  517. }()
  518. var (
  519. configJSON []byte // raw serialized image config
  520. downloadedRootFS *image.RootFS // rootFS from registered layers
  521. configRootFS *image.RootFS // rootFS from configuration
  522. release func() // release resources from rootFS download
  523. configPlatform *specs.Platform // for LCOW when registering downloaded layers
  524. )
  525. layerStoreOS := runtime.GOOS
  526. if platform != nil {
  527. layerStoreOS = platform.OS
  528. }
  529. // https://github.com/docker/docker/issues/24766 - Err on the side of caution,
  530. // explicitly blocking images intended for linux from the Windows daemon. On
  531. // Windows, we do this before the attempt to download, effectively serialising
  532. // the download slightly slowing it down. We have to do it this way, as
  533. // chances are the download of layers itself would fail due to file names
  534. // which aren't suitable for NTFS. At some point in the future, if a similar
  535. // check to block Windows images being pulled on Linux is implemented, it
  536. // may be necessary to perform the same type of serialisation.
  537. if runtime.GOOS == "windows" {
  538. configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
  539. if err != nil {
  540. return "", "", err
  541. }
  542. if configRootFS == nil {
  543. return "", "", errRootFSInvalid
  544. }
  545. if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil {
  546. return "", "", err
  547. }
  548. if len(descriptors) != len(configRootFS.DiffIDs) {
  549. return "", "", errRootFSMismatch
  550. }
  551. if platform == nil {
  552. // Early bath if the requested OS doesn't match that of the configuration.
  553. // This avoids doing the download, only to potentially fail later.
  554. if !system.IsOSSupported(configPlatform.OS) {
  555. return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS)
  556. }
  557. layerStoreOS = configPlatform.OS
  558. }
  559. // Populate diff ids in descriptors to avoid downloading foreign layers
  560. // which have been side loaded
  561. for i := range descriptors {
  562. descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i]
  563. }
  564. }
  565. if p.config.DownloadManager != nil {
  566. go func() {
  567. var (
  568. err error
  569. rootFS image.RootFS
  570. )
  571. downloadRootFS := *image.NewRootFS()
  572. rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layerStoreOS, descriptors, p.config.ProgressOutput)
  573. if err != nil {
  574. // Intentionally do not cancel the config download here
  575. // as the error from config download (if there is one)
  576. // is more interesting than the layer download error
  577. layerErrChan <- err
  578. return
  579. }
  580. downloadedRootFS = &rootFS
  581. close(downloadsDone)
  582. }()
  583. } else {
  584. // We have nothing to download
  585. close(downloadsDone)
  586. }
  587. if configJSON == nil {
  588. configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
  589. if err == nil && configRootFS == nil {
  590. err = errRootFSInvalid
  591. }
  592. if err != nil {
  593. cancel()
  594. select {
  595. case <-downloadsDone:
  596. case <-layerErrChan:
  597. }
  598. return "", "", err
  599. }
  600. }
  601. select {
  602. case <-downloadsDone:
  603. case err = <-layerErrChan:
  604. return "", "", err
  605. }
  606. if release != nil {
  607. defer release()
  608. }
  609. if downloadedRootFS != nil {
  610. // The DiffIDs returned in rootFS MUST match those in the config.
  611. // Otherwise the image config could be referencing layers that aren't
  612. // included in the manifest.
  613. if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
  614. return "", "", errRootFSMismatch
  615. }
  616. for i := range downloadedRootFS.DiffIDs {
  617. if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
  618. return "", "", errRootFSMismatch
  619. }
  620. }
  621. }
  622. imageID, err := p.config.ImageStore.Put(configJSON)
  623. if err != nil {
  624. return "", "", err
  625. }
  626. return imageID, manifestDigest, nil
  627. }
  628. func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) {
  629. select {
  630. case configJSON := <-configChan:
  631. rootfs, err := s.RootFSFromConfig(configJSON)
  632. if err != nil {
  633. return nil, nil, nil, err
  634. }
  635. platform, err := s.PlatformFromConfig(configJSON)
  636. if err != nil {
  637. return nil, nil, nil, err
  638. }
  639. return configJSON, rootfs, platform, nil
  640. case err := <-errChan:
  641. return nil, nil, nil, err
  642. // Don't need a case for ctx.Done in the select because cancellation
  643. // will trigger an error in p.pullSchema2ImageConfig.
  644. }
  645. }
  646. // pullManifestList handles "manifest lists" which point to various
  647. // platform-specific manifests.
  648. func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *specs.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) {
  649. manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
  650. if err != nil {
  651. return "", "", err
  652. }
  653. var platform specs.Platform
  654. if pp != nil {
  655. platform = *pp
  656. }
  657. logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), platforms.Format(platform), runtime.GOARCH)
  658. manifestMatches := filterManifests(mfstList.Manifests, platform)
  659. if len(manifestMatches) == 0 {
  660. errMsg := fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(platform))
  661. logrus.Debugf(errMsg)
  662. return "", "", errors.New(errMsg)
  663. }
  664. if len(manifestMatches) > 1 {
  665. logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String())
  666. }
  667. manifestDigest := manifestMatches[0].Digest
  668. if err := checkImageCompatibility(manifestMatches[0].Platform.OS, manifestMatches[0].Platform.OSVersion); err != nil {
  669. return "", "", err
  670. }
  671. manSvc, err := p.repo.Manifests(ctx)
  672. if err != nil {
  673. return "", "", err
  674. }
  675. manifest, err := manSvc.Get(ctx, manifestDigest)
  676. if err != nil {
  677. return "", "", err
  678. }
  679. manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest)
  680. if err != nil {
  681. return "", "", err
  682. }
  683. switch v := manifest.(type) {
  684. case *schema1.SignedManifest:
  685. platform := toOCIPlatform(manifestMatches[0].Platform)
  686. id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform)
  687. if err != nil {
  688. return "", "", err
  689. }
  690. case *schema2.DeserializedManifest:
  691. platform := toOCIPlatform(manifestMatches[0].Platform)
  692. id, _, err = p.pullSchema2(ctx, manifestRef, v, &platform)
  693. if err != nil {
  694. return "", "", err
  695. }
  696. default:
  697. return "", "", errors.New("unsupported manifest format")
  698. }
  699. return id, manifestListDigest, err
  700. }
  701. func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
  702. blobs := p.repo.Blobs(ctx)
  703. configJSON, err = blobs.Get(ctx, dgst)
  704. if err != nil {
  705. return nil, err
  706. }
  707. // Verify image config digest
  708. verifier := dgst.Verifier()
  709. if _, err := verifier.Write(configJSON); err != nil {
  710. return nil, err
  711. }
  712. if !verifier.Verified() {
  713. err := fmt.Errorf("image config verification failed for digest %s", dgst)
  714. logrus.Error(err)
  715. return nil, err
  716. }
  717. return configJSON, nil
  718. }
  719. // schema2ManifestDigest computes the manifest digest, and, if pulling by
  720. // digest, ensures that it matches the requested digest.
  721. func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
  722. _, canonical, err := mfst.Payload()
  723. if err != nil {
  724. return "", err
  725. }
  726. // If pull by digest, then verify the manifest digest.
  727. if digested, isDigested := ref.(reference.Canonical); isDigested {
  728. verifier := digested.Digest().Verifier()
  729. if _, err := verifier.Write(canonical); err != nil {
  730. return "", err
  731. }
  732. if !verifier.Verified() {
  733. err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
  734. logrus.Error(err)
  735. return "", err
  736. }
  737. return digested.Digest(), nil
  738. }
  739. return digest.FromBytes(canonical), nil
  740. }
  741. // allowV1Fallback checks if the error is a possible reason to fallback to v1
  742. // (even if confirmedV2 has been set already), and if so, wraps the error in
  743. // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
  744. // error unmodified.
  745. func allowV1Fallback(err error) error {
  746. switch v := err.(type) {
  747. case errcode.Errors:
  748. if len(v) != 0 {
  749. if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
  750. return fallbackError{
  751. err: err,
  752. confirmedV2: false,
  753. transportOK: true,
  754. }
  755. }
  756. }
  757. case errcode.Error:
  758. if shouldV2Fallback(v) {
  759. return fallbackError{
  760. err: err,
  761. confirmedV2: false,
  762. transportOK: true,
  763. }
  764. }
  765. case *url.Error:
  766. if v.Err == auth.ErrNoBasicAuthCredentials {
  767. return fallbackError{err: err, confirmedV2: false}
  768. }
  769. }
  770. return err
  771. }
  772. func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) {
  773. // If pull by digest, then verify the manifest digest. NOTE: It is
  774. // important to do this first, before any other content validation. If the
  775. // digest cannot be verified, don't even bother with those other things.
  776. if digested, isCanonical := ref.(reference.Canonical); isCanonical {
  777. verifier := digested.Digest().Verifier()
  778. if _, err := verifier.Write(signedManifest.Canonical); err != nil {
  779. return nil, err
  780. }
  781. if !verifier.Verified() {
  782. err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
  783. logrus.Error(err)
  784. return nil, err
  785. }
  786. }
  787. m = &signedManifest.Manifest
  788. if m.SchemaVersion != 1 {
  789. return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref))
  790. }
  791. if len(m.FSLayers) != len(m.History) {
  792. return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref))
  793. }
  794. if len(m.FSLayers) == 0 {
  795. return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref))
  796. }
  797. return m, nil
  798. }
  799. // fixManifestLayers removes repeated layers from the manifest and checks the
  800. // correctness of the parent chain.
  801. func fixManifestLayers(m *schema1.Manifest) error {
  802. imgs := make([]*image.V1Image, len(m.FSLayers))
  803. for i := range m.FSLayers {
  804. img := &image.V1Image{}
  805. if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
  806. return err
  807. }
  808. imgs[i] = img
  809. if err := v1.ValidateID(img.ID); err != nil {
  810. return err
  811. }
  812. }
  813. if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
  814. // Windows base layer can point to a base layer parent that is not in manifest.
  815. return errors.New("invalid parent ID in the base layer of the image")
  816. }
  817. // check general duplicates to error instead of a deadlock
  818. idmap := make(map[string]struct{})
  819. var lastID string
  820. for _, img := range imgs {
  821. // skip IDs that appear after each other, we handle those later
  822. if _, exists := idmap[img.ID]; img.ID != lastID && exists {
  823. return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
  824. }
  825. lastID = img.ID
  826. idmap[lastID] = struct{}{}
  827. }
  828. // backwards loop so that we keep the remaining indexes after removing items
  829. for i := len(imgs) - 2; i >= 0; i-- {
  830. if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
  831. m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
  832. m.History = append(m.History[:i], m.History[i+1:]...)
  833. } else if imgs[i].Parent != imgs[i+1].ID {
  834. return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
  835. }
  836. }
  837. return nil
  838. }
  839. func createDownloadFile() (*os.File, error) {
  840. return ioutil.TempFile("", "GetImageBlob")
  841. }
  842. func toOCIPlatform(p manifestlist.PlatformSpec) specs.Platform {
  843. return specs.Platform{
  844. OS: p.OS,
  845. Architecture: p.Architecture,
  846. Variant: p.Variant,
  847. OSFeatures: p.OSFeatures,
  848. OSVersion: p.OSVersion,
  849. }
  850. }