|
@@ -3,6 +3,7 @@ package graph
|
|
|
import (
|
|
|
"errors"
|
|
|
"fmt"
|
|
|
+ "io"
|
|
|
"net"
|
|
|
"net/url"
|
|
|
"strings"
|
|
@@ -137,31 +138,29 @@ func (p *v1Puller) pullRepository(askedTag string) error {
|
|
|
}
|
|
|
|
|
|
// ensure no two downloads of the same image happen at the same time
|
|
|
- if c, err := p.poolAdd("pull", "img:"+img.ID); err != nil {
|
|
|
- if c != nil {
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
|
|
|
- <-c
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
|
|
|
- } else {
|
|
|
- logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
|
|
|
- }
|
|
|
+ broadcaster, found := p.poolAdd("pull", "img:"+img.ID)
|
|
|
+ if found {
|
|
|
+ broadcaster.Add(out)
|
|
|
+ broadcaster.Wait()
|
|
|
+ out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
|
|
|
errors <- nil
|
|
|
return
|
|
|
}
|
|
|
+ broadcaster.Add(out)
|
|
|
defer p.poolRemove("pull", "img:"+img.ID)
|
|
|
|
|
|
// we need to retain it until tagging
|
|
|
p.graph.Retain(sessionID, img.ID)
|
|
|
imgIDs = append(imgIDs, img.ID)
|
|
|
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil))
|
|
|
success := false
|
|
|
var lastErr, err error
|
|
|
var isDownloaded bool
|
|
|
for _, ep := range p.repoInfo.Index.Mirrors {
|
|
|
ep += "v1/"
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
|
|
|
- if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil {
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
|
|
|
+ if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep, repoData.Tokens); err != nil {
|
|
|
// Don't report errors when pulling from mirrors.
|
|
|
logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err)
|
|
|
continue
|
|
@@ -172,12 +171,12 @@ func (p *v1Puller) pullRepository(askedTag string) error {
|
|
|
}
|
|
|
if !success {
|
|
|
for _, ep := range repoData.Endpoints {
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
|
|
|
- if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil {
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
|
|
|
+ if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep, repoData.Tokens); err != nil {
|
|
|
// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
|
|
|
// As the error is also given to the output stream the user will see the error.
|
|
|
lastErr = err
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil))
|
|
|
continue
|
|
|
}
|
|
|
layersDownloaded = layersDownloaded || isDownloaded
|
|
@@ -187,11 +186,11 @@ func (p *v1Puller) pullRepository(askedTag string) error {
|
|
|
}
|
|
|
if !success {
|
|
|
err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName, lastErr)
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
|
|
|
errors <- err
|
|
|
return
|
|
|
}
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
|
|
|
|
|
|
errors <- nil
|
|
|
}
|
|
@@ -226,12 +225,11 @@ func (p *v1Puller) pullRepository(askedTag string) error {
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
-func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, error) {
|
|
|
+func (p *v1Puller) pullImage(out io.Writer, imgID, endpoint string, token []string) (bool, error) {
|
|
|
history, err := p.session.GetRemoteHistory(imgID, endpoint)
|
|
|
if err != nil {
|
|
|
return false, err
|
|
|
}
|
|
|
- out := p.config.OutStream
|
|
|
out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
|
|
|
// FIXME: Try to stream the images?
|
|
|
// FIXME: Launch the getRemoteImage() in goroutines
|
|
@@ -246,14 +244,18 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
|
|
|
id := history[i]
|
|
|
|
|
|
// ensure no two downloads of the same layer happen at the same time
|
|
|
- if c, err := p.poolAdd("pull", "layer:"+id); err != nil {
|
|
|
- logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
|
|
|
- <-c
|
|
|
+ broadcaster, found := p.poolAdd("pull", "layer:"+id)
|
|
|
+ if found {
|
|
|
+ logrus.Debugf("Image (id: %s) pull is already running, skipping", id)
|
|
|
+ broadcaster.Add(out)
|
|
|
+ broadcaster.Wait()
|
|
|
+ } else {
|
|
|
+ broadcaster.Add(out)
|
|
|
}
|
|
|
defer p.poolRemove("pull", "layer:"+id)
|
|
|
|
|
|
if !p.graph.Exists(id) {
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
|
|
|
var (
|
|
|
imgJSON []byte
|
|
|
imgSize int64
|
|
@@ -264,7 +266,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
|
|
|
for j := 1; j <= retries; j++ {
|
|
|
imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint)
|
|
|
if err != nil && j == retries {
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
|
|
|
return layersDownloaded, err
|
|
|
} else if err != nil {
|
|
|
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
|
|
@@ -273,7 +275,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
|
|
|
img, err = image.NewImgJSON(imgJSON)
|
|
|
layersDownloaded = true
|
|
|
if err != nil && j == retries {
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
|
|
|
return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
|
|
|
} else if err != nil {
|
|
|
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
|
|
@@ -289,7 +291,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
|
|
|
if j > 1 {
|
|
|
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
|
|
|
}
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
|
|
|
layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize)
|
|
|
if uerr, ok := err.(*url.Error); ok {
|
|
|
err = uerr.Err
|
|
@@ -298,7 +300,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
|
|
|
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
|
|
|
continue
|
|
|
} else if err != nil {
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
|
|
|
return layersDownloaded, err
|
|
|
}
|
|
|
layersDownloaded = true
|
|
@@ -307,7 +309,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
|
|
|
err = p.graph.Register(img,
|
|
|
progressreader.New(progressreader.Config{
|
|
|
In: layer,
|
|
|
- Out: out,
|
|
|
+ Out: broadcaster,
|
|
|
Formatter: p.sf,
|
|
|
Size: imgSize,
|
|
|
NewLines: false,
|
|
@@ -318,14 +320,14 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
|
|
|
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
|
|
|
continue
|
|
|
} else if err != nil {
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
|
|
|
return layersDownloaded, err
|
|
|
} else {
|
|
|
break
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
|
|
|
+ broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
|
|
|
}
|
|
|
return layersDownloaded, nil
|
|
|
}
|