Explorar el Código

Merge pull request #37659 from Microsoft/jjh/37356

LCOW: Mount to short container paths to avoid command-line length limit
John Stephens hace 7 años
padre
commit
8613b34a7e

+ 4 - 0
builder/remotecontext/detect.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"os"
 	"os"
+	"runtime"
 	"strings"
 	"strings"
 
 
 	"github.com/containerd/continuity/driver"
 	"github.com/containerd/continuity/driver"
@@ -173,6 +174,9 @@ func StatAt(remote builder.Source, path string) (os.FileInfo, error) {
 func FullPath(remote builder.Source, path string) (string, error) {
 func FullPath(remote builder.Source, path string) (string, error) {
 	fullPath, err := remote.Root().ResolveScopedPath(path, true)
 	fullPath, err := remote.Root().ResolveScopedPath(path, true)
 	if err != nil {
 	if err != nil {
+		if runtime.GOOS == "windows" {
+			return "", fmt.Errorf("failed to resolve scoped path %s (%s): %s. Possible cause is a forbidden path outside the build context", path, fullPath, err)
+		}
 		return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error
 		return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error
 	}
 	}
 	return fullPath, nil
 	return fullPath, nil

+ 55 - 7
daemon/graphdriver/lcow/lcow.go

@@ -212,6 +212,17 @@ func (d *Driver) getVMID(id string) string {
 	return id
 	return id
 }
 }
 
 
+// remapLongToShortContainerPath does the mapping of a long container path for a
+// SCSI attached disk, to a short container path where it's actually mounted.
+func remapLongToShortContainerPath(longContainerPath string, attachCounter uint64, svmName string) string {
+	shortContainerPath := longContainerPath
+	if shortContainerPath != "" && shortContainerPath != toolsScratchPath {
+		shortContainerPath = fmt.Sprintf("/tmp/d%d", attachCounter)
+		logrus.Debugf("lcowdriver: UVM %s: remapping %s --> %s", svmName, longContainerPath, shortContainerPath)
+	}
+	return shortContainerPath
+}
+
 // startServiceVMIfNotRunning starts a service utility VM if it is not currently running.
 // startServiceVMIfNotRunning starts a service utility VM if it is not currently running.
 // It can optionally be started with a mapped virtual disk. Returns a opengcs config structure
 // It can optionally be started with a mapped virtual disk. Returns a opengcs config structure
 // representing the VM.
 // representing the VM.
@@ -239,6 +250,8 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.Mapped
 
 
 	if exists {
 	if exists {
 		// Service VM is already up and running. In this case, just hot add the vhds.
 		// Service VM is already up and running. In this case, just hot add the vhds.
+		// Note that hotAddVHDs will remap long to short container paths, so no need
+		// for us to that here.
 		logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd)
 		logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd)
 		if err := svm.hotAddVHDs(mvdToAdd...); err != nil {
 		if err := svm.hotAddVHDs(mvdToAdd...); err != nil {
 			logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err)
 			logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err)
@@ -302,10 +315,23 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.Mapped
 	logrus.Debugf("%s: releasing cachedScratchMutex", title)
 	logrus.Debugf("%s: releasing cachedScratchMutex", title)
 	d.cachedScratchMutex.Unlock()
 	d.cachedScratchMutex.Unlock()
 
 
-	// If requested to start it with a mapped virtual disk, add it now.
-	svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvdToAdd...)
-	for _, mvd := range svm.config.MappedVirtualDisks {
-		svm.attachedVHDs[mvd.HostPath] = 1
+	// Add mapped virtual disks. First those that are already in the configuration. Generally,
+	// the only one that will be here is the service VMs scratch. The exception is when invoked
+	// via the graphdrivers DiffGetter implementation.
+	for i, mvd := range svm.config.MappedVirtualDisks {
+		svm.attachCounter++
+		svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter}
+
+		// No-op for the service VMs scratch disk. Only applicable in the DiffGetter interface invocation.
+		svm.config.MappedVirtualDisks[i].ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name)
+	}
+
+	// Then the remaining ones to add, and adding them to the startup configuration.
+	for _, mvd := range mvdToAdd {
+		svm.attachCounter++
+		svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter}
+		mvd.ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name)
+		svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd)
 	}
 	}
 
 
 	// Start it.
 	// Start it.
@@ -351,6 +377,7 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.Mapped
 			return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err)
 			return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err)
 		}
 		}
 		svm.scratchAttached = true
 		svm.scratchAttached = true
+		// Don't need to ref-count here as it will be done via hotAddVHDsAtStart() call above.
 	}
 	}
 
 
 	logrus.Debugf("%s: (%s) success", title, context)
 	logrus.Debugf("%s: (%s) success", title, context)
@@ -787,8 +814,13 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
 	}
 	}
 
 
 	// Obtain the tar stream for it
 	// Obtain the tar stream for it
-	logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, mvd.ContainerPath, ld.size, ld.isSandbox)
-	tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, mvd.ContainerPath, ld.isSandbox, ld.size)
+	// The actual container path will have be remapped to a short name, so use that.
+	actualContainerPath := svm.getShortContainerPath(&mvd)
+	if actualContainerPath == "" {
+		return nil, fmt.Errorf("failed to get short container path for %+v in SVM %s", mvd, svm.config.Name)
+	}
+	logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, actualContainerPath, ld.size, ld.isSandbox)
+	tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, actualContainerPath, ld.isSandbox, ld.size)
 	if err != nil {
 	if err != nil {
 		svm.hotRemoveVHDs(mvd)
 		svm.hotRemoveVHDs(mvd)
 		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
 		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
@@ -960,6 +992,17 @@ func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) {
 }
 }
 
 
 func hostToGuest(hostpath string) string {
 func hostToGuest(hostpath string) string {
+	// This is the "long" container path. At the point of which we are
+	// calculating this, we don't know which service VM we're going to be
+	// using, so we can't translate this to a short path yet, instead
+	// deferring until the point of which it's added to an SVM. We don't
+	// use long container paths in SVMs for SCSI disks, otherwise it can cause
+	// command line operations that we invoke to fail due to being over ~4200
+	// characters when there are ~47 layers involved. An example of this is
+	// the mount call to create the overlay across multiple SCSI-attached disks.
+	// It doesn't affect VPMem attached layers during container creation as
+	// these get mapped by openGCS to /tmp/N/M where N is a container instance
+	// number, and M is a layer number.
 	return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath)))
 	return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath)))
 }
 }
 
 
@@ -1002,7 +1045,12 @@ func (fgc *fileGetCloserFromSVM) Close() error {
 func (fgc *fileGetCloserFromSVM) Get(filename string) (io.ReadCloser, error) {
 func (fgc *fileGetCloserFromSVM) Get(filename string) (io.ReadCloser, error) {
 	errOut := &bytes.Buffer{}
 	errOut := &bytes.Buffer{}
 	outOut := &bytes.Buffer{}
 	outOut := &bytes.Buffer{}
-	file := path.Join(fgc.mvd.ContainerPath, filename)
+	// Must map to the actual "short" container path where the SCSI disk was mounted
+	actualContainerPath := fgc.svm.getShortContainerPath(fgc.mvd)
+	if actualContainerPath == "" {
+		return nil, fmt.Errorf("inconsistency detected: couldn't get short container path for %+v in utility VM %s", fgc.mvd, fgc.svm.config.Name)
+	}
+	file := path.Join(actualContainerPath, filename)
 	if err := fgc.svm.runProcess(fmt.Sprintf("cat %s", file), nil, outOut, errOut); err != nil {
 	if err := fgc.svm.runProcess(fmt.Sprintf("cat %s", file), nil, outOut, errOut); err != nil {
 		logrus.Debugf("cat %s failed: %s", file, errOut.String())
 		logrus.Debugf("cat %s failed: %s", file, errOut.String())
 		return nil, err
 		return nil, err

+ 56 - 16
daemon/graphdriver/lcow/lcow_svm.go

@@ -3,6 +3,7 @@
 package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
 package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
 
 
 import (
 import (
+	"bytes"
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
@@ -34,6 +35,13 @@ type serviceVMMapItem struct {
 	refCount int        // refcount for VM
 	refCount int        // refcount for VM
 }
 }
 
 
+// attachedVHD is for reference counting SCSI disks attached to a service VM,
+// and for a counter used to generate a short path name for the container path.
+type attachedVHD struct {
+	refCount      int
+	attachCounter uint64
+}
+
 type serviceVM struct {
 type serviceVM struct {
 	sync.Mutex                     // Serialises operations being performed in this service VM.
 	sync.Mutex                     // Serialises operations being performed in this service VM.
 	scratchAttached bool           // Has a scratch been attached?
 	scratchAttached bool           // Has a scratch been attached?
@@ -47,8 +55,9 @@ type serviceVM struct {
 	stopStatus chan interface{}
 	stopStatus chan interface{}
 	stopError  error
 	stopError  error
 
 
-	attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed.
-	unionMounts  map[string]int // Map ref counting all the union filesystems we mounted.
+	attachCounter uint64                  // Increasing counter for each add
+	attachedVHDs  map[string]*attachedVHD // Map ref counting all the VHDS we've hot-added/hot-removed.
+	unionMounts   map[string]int          // Map ref counting all the union filesystems we mounted.
 }
 }
 
 
 // add will add an id to the service vm map. There are three cases:
 // add will add an id to the service vm map. There are three cases:
@@ -73,7 +82,7 @@ func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool,
 	newSVM := &serviceVM{
 	newSVM := &serviceVM{
 		startStatus:  make(chan interface{}),
 		startStatus:  make(chan interface{}),
 		stopStatus:   make(chan interface{}),
 		stopStatus:   make(chan interface{}),
-		attachedVHDs: make(map[string]int),
+		attachedVHDs: make(map[string]*attachedVHD),
 		unionMounts:  make(map[string]int),
 		unionMounts:  make(map[string]int),
 		config:       &client.Config{},
 		config:       &client.Config{},
 	}
 	}
@@ -203,15 +212,18 @@ func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error
 	defer svm.Unlock()
 	defer svm.Unlock()
 	for i, mvd := range mvds {
 	for i, mvd := range mvds {
 		if _, ok := svm.attachedVHDs[mvd.HostPath]; ok {
 		if _, ok := svm.attachedVHDs[mvd.HostPath]; ok {
-			svm.attachedVHDs[mvd.HostPath]++
+			svm.attachedVHDs[mvd.HostPath].refCount++
+			logrus.Debugf("lcowdriver: UVM %s: %s already present, refCount now %d", svm.config.Name, mvd.HostPath, svm.attachedVHDs[mvd.HostPath].refCount)
 			continue
 			continue
 		}
 		}
 
 
-		if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil {
+		svm.attachCounter++
+		shortContainerPath := remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name)
+		if err := svm.config.HotAddVhd(mvd.HostPath, shortContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil {
 			svm.hotRemoveVHDsNoLock(mvds[:i]...)
 			svm.hotRemoveVHDsNoLock(mvds[:i]...)
 			return err
 			return err
 		}
 		}
-		svm.attachedVHDs[mvd.HostPath] = 1
+		svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter}
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -238,15 +250,17 @@ func (svm *serviceVM) hotRemoveVHDsNoLock(mvds ...hcsshim.MappedVirtualDisk) err
 			// defers the VM start to the first operation, it's possible that nothing have been hot-added
 			// defers the VM start to the first operation, it's possible that nothing have been hot-added
 			// when Put() is called. To avoid Put returning an error in that case, we simply continue if we
 			// when Put() is called. To avoid Put returning an error in that case, we simply continue if we
 			// don't find the vhd attached.
 			// don't find the vhd attached.
+			logrus.Debugf("lcowdriver: UVM %s: %s is not attached, not doing anything", svm.config.Name, mvd.HostPath)
 			continue
 			continue
 		}
 		}
 
 
-		if svm.attachedVHDs[mvd.HostPath] > 1 {
-			svm.attachedVHDs[mvd.HostPath]--
+		if svm.attachedVHDs[mvd.HostPath].refCount > 1 {
+			svm.attachedVHDs[mvd.HostPath].refCount--
+			logrus.Debugf("lcowdriver: UVM %s: %s refCount dropped to %d. not removing from UVM", svm.config.Name, mvd.HostPath, svm.attachedVHDs[mvd.HostPath].refCount)
 			continue
 			continue
 		}
 		}
 
 
-		// last VHD, so remove from VM and map
+		// last reference to VHD, so remove from VM and map
 		if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil {
 		if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil {
 			delete(svm.attachedVHDs, mvd.HostPath)
 			delete(svm.attachedVHDs, mvd.HostPath)
 		} else {
 		} else {
@@ -270,6 +284,19 @@ func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile s
 	return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile)
 	return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile)
 }
 }
 
 
+// getShortContainerPath looks up where a SCSI disk was actually mounted
+// in a service VM when we remapped a long path name to a short name.
+func (svm *serviceVM) getShortContainerPath(mvd *hcsshim.MappedVirtualDisk) string {
+	if mvd.ContainerPath == "" {
+		return ""
+	}
+	avhd, ok := svm.attachedVHDs[mvd.HostPath]
+	if !ok {
+		return ""
+	}
+	return fmt.Sprintf("/tmp/d%d", avhd.attachCounter)
+}
+
 func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) {
 func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) {
 	if len(mvds) == 0 {
 	if len(mvds) == 0 {
 		return fmt.Errorf("createUnionMount: error must have at least 1 layer")
 		return fmt.Errorf("createUnionMount: error must have at least 1 layer")
@@ -288,11 +315,11 @@ func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedV
 
 
 	var lowerLayers []string
 	var lowerLayers []string
 	if mvds[0].ReadOnly {
 	if mvds[0].ReadOnly {
-		lowerLayers = append(lowerLayers, mvds[0].ContainerPath)
+		lowerLayers = append(lowerLayers, svm.getShortContainerPath(&mvds[0]))
 	}
 	}
 
 
 	for i := 1; i < len(mvds); i++ {
 	for i := 1; i < len(mvds); i++ {
-		lowerLayers = append(lowerLayers, mvds[i].ContainerPath)
+		lowerLayers = append(lowerLayers, svm.getShortContainerPath(&mvds[i]))
 	}
 	}
 
 
 	logrus.Debugf("Doing the overlay mount with union directory=%s", mountName)
 	logrus.Debugf("Doing the overlay mount with union directory=%s", mountName)
@@ -303,15 +330,15 @@ func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedV
 	var cmd string
 	var cmd string
 	if len(mvds) == 1 {
 	if len(mvds) == 1 {
 		// `FROM SCRATCH` case and the only layer. No overlay required.
 		// `FROM SCRATCH` case and the only layer. No overlay required.
-		cmd = fmt.Sprintf("mount %s %s", mvds[0].ContainerPath, mountName)
+		cmd = fmt.Sprintf("mount %s %s", svm.getShortContainerPath(&mvds[0]), mountName)
 	} else if mvds[0].ReadOnly {
 	} else if mvds[0].ReadOnly {
 		// Readonly overlay
 		// Readonly overlay
 		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s",
 		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s",
 			strings.Join(lowerLayers, ","),
 			strings.Join(lowerLayers, ","),
 			mountName)
 			mountName)
 	} else {
 	} else {
-		upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath)
-		work := fmt.Sprintf("%s/work", mvds[0].ContainerPath)
+		upper := fmt.Sprintf("%s/upper", svm.getShortContainerPath(&mvds[0]))
+		work := fmt.Sprintf("%s/work", svm.getShortContainerPath(&mvds[0]))
 
 
 		if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil {
 		if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil {
 			return err
 			return err
@@ -359,7 +386,15 @@ func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.Mapped
 }
 }
 
 
 func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
 func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
-	process, err := svm.config.RunProcess(command, stdin, stdout, stderr)
+	var process hcsshim.Process
+	var err error
+	errOut := &bytes.Buffer{}
+
+	if stderr != nil {
+		process, err = svm.config.RunProcess(command, stdin, stdout, stderr)
+	} else {
+		process, err = svm.config.RunProcess(command, stdin, stdout, errOut)
+	}
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -372,7 +407,12 @@ func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writ
 	}
 	}
 
 
 	if exitCode != 0 {
 	if exitCode != 0 {
-		return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode)
+		// If the caller isn't explicitly capturing stderr output, then capture it here instead.
+		e := fmt.Sprintf("svm.runProcess: command %s failed with exit code %d", command, exitCode)
+		if stderr == nil {
+			e = fmt.Sprintf("%s. (%s)", e, errOut.String())
+		}
+		return fmt.Errorf(e)
 	}
 	}
 	return nil
 	return nil
 }
 }