Browse Source

Merge pull request #42451 from thaJeztah/remove_lcow_step1

Remove LCOW code (step 1)
Brian Goff 4 năm trước cách đây
mục cha
commit
a7ea29a5a6
31 tập tin đã thay đổi với 184 bổ sung5539 xóa
  1. 0 1
      .github/CODEOWNERS
  2. 0 4
      api/server/router/image/image_routes.go
  3. 1 5
      builder/builder-next/builder.go
  4. 0 3
      builder/dockerfile/builder.go
  5. 1 7
      builder/dockerfile/dispatchers.go
  6. 0 2
      cmd/dockerd/daemon.go
  7. 1 1
      container/container.go
  8. 0 6
      daemon/create.go
  9. 4 11
      daemon/daemon_windows.go
  10. 0 1174
      daemon/graphdriver/lcow/lcow.go
  11. 0 421
      daemon/graphdriver/lcow/lcow_svm.go
  12. 0 139
      daemon/graphdriver/lcow/remotefs.go
  13. 0 211
      daemon/graphdriver/lcow/remotefs_file.go
  14. 0 123
      daemon/graphdriver/lcow/remotefs_filedriver.go
  15. 0 212
      daemon/graphdriver/lcow/remotefs_pathdriver.go
  16. 0 1
      daemon/graphdriver/register/register_windows.go
  17. 142 239
      hack/ci/windows.ps1
  18. 2 6
      image/tarexport/load.go
  19. 0 48
      pkg/system/lcow.go
  20. 0 13
      pkg/system/lcow_unsupported.go
  21. 5 27
      pkg/system/path.go
  22. 6 0
      pkg/system/path_unix.go
  23. 22 1
      pkg/system/path_windows.go
  24. 0 6
      project/PACKAGERS.md
  25. 0 1325
      vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go
  26. 0 411
      vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go
  27. 0 209
      vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go
  28. 0 76
      vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go
  29. 0 109
      vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go
  30. 0 578
      vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go
  31. 0 170
      vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go

+ 0 - 1
.github/CODEOWNERS

@@ -6,7 +6,6 @@
 builder/**                              @tonistiigi
 contrib/mkimage/**                      @tianon
 daemon/graphdriver/devmapper/**         @rhvgoyal 
-daemon/graphdriver/lcow/**              @johnstep
 daemon/graphdriver/overlay/**           @dmcgowan
 daemon/graphdriver/overlay2/**          @dmcgowan
 daemon/graphdriver/windows/**           @johnstep

+ 0 - 4
api/server/router/image/image_routes.go

@@ -16,7 +16,6 @@ import (
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/streamformatter"
-	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/registry"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
@@ -50,9 +49,6 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
 			if err != nil {
 				return err
 			}
-			if err := system.ValidatePlatform(sp); err != nil {
-				return err
-			}
 			platform = &sp
 		}
 	}

+ 1 - 5
builder/builder-next/builder.go

@@ -20,7 +20,6 @@ import (
 	"github.com/docker/docker/libnetwork"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/streamformatter"
-	"github.com/docker/docker/pkg/system"
 	controlapi "github.com/moby/buildkit/api/services/control"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/control"
@@ -299,13 +298,10 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
 	if opt.Options.Platform != "" {
 		// same as in newBuilder in builder/dockerfile.builder.go
 		// TODO: remove once opt.Options.Platform is of type specs.Platform
-		sp, err := platforms.Parse(opt.Options.Platform)
+		_, err := platforms.Parse(opt.Options.Platform)
 		if err != nil {
 			return nil, err
 		}
-		if err := system.ValidatePlatform(sp); err != nil {
-			return nil, err
-		}
 		frontendAttrs["platform"] = opt.Options.Platform
 	}
 

+ 0 - 3
builder/dockerfile/builder.go

@@ -159,9 +159,6 @@ func newBuilder(clientCtx context.Context, options builderOptions) (*Builder, er
 		if err != nil {
 			return nil, err
 		}
-		if err := system.ValidatePlatform(sp); err != nil {
-			return nil, err
-		}
 		b.platform = &sp
 	}
 

+ 1 - 7
builder/dockerfile/dispatchers.go

@@ -172,9 +172,6 @@ func initializeStage(d dispatchRequest, cmd *instructions.Stage) error {
 		if err != nil {
 			return errors.Wrapf(err, "failed to parse platform %s", v)
 		}
-		if err := system.ValidatePlatform(p); err != nil {
-			return err
-		}
 		platform = &p
 	}
 
@@ -264,10 +261,7 @@ func (d *dispatchRequest) getImageOrStage(name string, platform *specs.Platform)
 		// from it.
 		if runtime.GOOS == "windows" {
 			if platform == nil || platform.OS == "linux" {
-				if !system.LCOWSupported() {
-					return nil, errors.New("Linux containers are not supported on this system")
-				}
-				imageImage.OS = "linux"
+				return nil, errors.New("Linux containers are not supported on this system")
 			} else if platform.OS == "windows" {
 				return nil, errors.New("Windows does not support FROM scratch")
 			} else {

+ 0 - 2
cmd/dockerd/daemon.go

@@ -117,8 +117,6 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 		return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
 	}
 
-	system.InitLCOW(cli.Config.Experimental)
-
 	if err := setDefaultUmask(); err != nil {
 		return err
 	}

+ 1 - 1
container/container.go

@@ -746,7 +746,7 @@ func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string
 	}
 
 	env := make([]string, 0, envSize)
-	if runtime.GOOS != "windows" || (runtime.GOOS == "windows" && os == "linux") {
+	if runtime.GOOS != "windows" {
 		env = append(env, "PATH="+system.DefaultPathEnv(os))
 		env = append(env, "HOSTNAME="+container.Config.Hostname)
 		if tty {

+ 0 - 6
daemon/create.go

@@ -69,12 +69,6 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container
 		if err == nil {
 			os = img.OS
 		}
-	} else {
-		// This mean scratch. On Windows, we can safely assume that this is a linux
-		// container. On other platforms, it's the host OS (which it already is)
-		if isWindows && system.LCOWSupported() {
-			os = "linux"
-		}
 	}
 
 	warnings, err := daemon.verifyContainerSettings(os, opts.params.HostConfig, opts.params.Config, false)

+ 4 - 11
daemon/daemon_windows.go

@@ -499,19 +499,12 @@ func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig
 // conditionalMountOnStart is a platform specific helper function during the
 // container start to call mount.
 func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
-
-	// Bail out now for Linux containers. We cannot mount the containers filesystem on the
-	// host as it is a non-Windows filesystem.
-	if system.LCOWSupported() && container.OS != "windows" {
+	if daemon.runAsHyperVContainer(container.HostConfig) {
+		// We do not mount if a Hyper-V container as it needs to be mounted inside the
+		// utility VM, not the host.
 		return nil
 	}
-
-	// We do not mount if a Hyper-V container as it needs to be mounted inside the
-	// utility VM, not the host.
-	if !daemon.runAsHyperVContainer(container.HostConfig) {
-		return daemon.Mount(container)
-	}
-	return nil
+	return daemon.Mount(container)
 }
 
 // conditionalUnmountOnCleanup is a platform specific helper function called

+ 0 - 1174
daemon/graphdriver/lcow/lcow.go

@@ -1,1174 +0,0 @@
-// +build windows
-
-// Locale:      en-gb
-// About:       Graph-driver for Linux Containers On Windows (LCOW)
-//
-// This graphdriver runs in two modes. Yet to be determined which one will
-// be the shipping mode. The global mode is where a single utility VM
-// is used for all service VM tool operations. This isn't safe security-wise
-// as it's attaching a sandbox of multiple containers to it, containing
-// untrusted data. This may be fine for client devops scenarios. In
-// safe mode, a unique utility VM is instantiated for all service VM tool
-// operations. The downside of safe-mode is that operations are slower as
-// a new service utility VM has to be started and torn-down when needed.
-//
-// Options:
-//
-// The following options are read by the graphdriver itself:
-//
-//   * lcow.globalmode - Enables global service VM Mode
-//        -- Possible values:     true/false
-//        -- Default if omitted:  false
-//
-//   * lcow.sandboxsize - Specifies a custom sandbox size in GB for starting a container
-//        -- Possible values:      >= default sandbox size (opengcs defined, currently 20)
-//        -- Default if omitted:  20
-//
-// The following options are read by opengcs:
-//
-//   * lcow.kirdpath - Specifies a custom path to a kernel/initrd pair
-//        -- Possible values:      Any local path that is not a mapped drive
-//        -- Default if omitted:  %ProgramFiles%\Linux Containers
-//
-//   * lcow.bootparameters - Specifies additional boot parameters for booting in kernel+initrd mode
-//        -- Possible values:      Any valid linux kernel boot options
-//        -- Default if omitted:  <nil>
-//
-//   * lcow.timeout - Specifies a timeout for utility VM operations in seconds
-//        -- Possible values:      >=0
-//        -- Default if omitted:  300
-
-// TODO: Grab logs from SVM at terminate or errors
-
-package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"path"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"sync"
-	"syscall"
-	"time"
-
-	"github.com/Microsoft/go-winio/pkg/security"
-	"github.com/Microsoft/hcsshim"
-	"github.com/Microsoft/hcsshim/ext4/tar2ext4"
-	"github.com/Microsoft/opengcs/client"
-	"github.com/docker/docker/daemon/graphdriver"
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/containerfs"
-	"github.com/docker/docker/pkg/idtools"
-	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/reexec"
-	"github.com/sirupsen/logrus"
-)
-
-// noreexec controls reexec functionality. Off by default, on for debugging purposes.
-var noreexec = false
-
-// init registers this driver to the register. It gets initialised by the
-// function passed in the second parameter, implemented in this file.
-func init() {
-	graphdriver.Register("lcow", InitDriver)
-	// DOCKER_LCOW_NOREEXEC allows for inline processing which makes
-	// debugging issues in the re-exec codepath significantly easier.
-	if os.Getenv("DOCKER_LCOW_NOREEXEC") != "" {
-		logrus.Warnf("LCOW Graphdriver is set to not re-exec. This is intended for debugging purposes only.")
-		noreexec = true
-	} else {
-		reexec.Register("docker-lcow-tar2ext4", tar2ext4Reexec)
-	}
-}
-
-const (
-	// sandboxFilename is the name of the file containing a layer's sandbox (read-write layer).
-	sandboxFilename = "sandbox.vhdx"
-
-	// scratchFilename is the name of the scratch-space used by an SVM to avoid running out of memory.
-	scratchFilename = "scratch.vhdx"
-
-	// layerFilename is the name of the file containing a layer's read-only contents.
-	// Note this really is VHD format, not VHDX.
-	layerFilename = "layer.vhd"
-
-	// toolsScratchPath is a location in a service utility VM that the tools can use as a
-	// scratch space to avoid running out of memory.
-	toolsScratchPath = "/tmp/scratch"
-
-	// svmGlobalID is the ID used in the serviceVMs map for the global service VM when running in "global" mode.
-	svmGlobalID = "_lcow_global_svm_"
-
-	// cacheDirectory is the sub-folder under the driver's data-root used to cache blank sandbox and scratch VHDs.
-	cacheDirectory = "cache"
-
-	// scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs
-	scratchDirectory = "scratch"
-
-	// errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending.
-	errOperationPending syscall.Errno = 0xc0370103
-)
-
-// Driver represents an LCOW graph driver.
-type Driver struct {
-	dataRoot           string     // Root path on the host where we are storing everything.
-	cachedSandboxFile  string     // Location of the local default-sized cached sandbox.
-	cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox.
-	cachedScratchFile  string     // Location of the local cached empty scratch space.
-	cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch.
-	options            []string   // Graphdriver options we are initialised with.
-	globalMode         bool       // Indicates if running in an unsafe/global service VM mode.
-	defaultSandboxSize uint64     // The default sandbox size to use if one is not specified
-
-	// NOTE: It is OK to use a cache here because Windows does not support
-	// restoring containers when the daemon dies.
-	serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running.
-}
-
-// layerDetails is the structure returned by a helper function `getLayerDetails`
-// for getting information about a layer folder
-type layerDetails struct {
-	filename  string // \path\to\sandbox.vhdx or \path\to\layer.vhd
-	size      int64  // size of the above file
-	isSandbox bool   // true if sandbox.vhdx
-}
-
-// deletefiles is a helper function for initialisation where we delete any
-// left-over scratch files in case we were previously forcibly terminated.
-func deletefiles(path string, f os.FileInfo, err error) error {
-	if strings.HasSuffix(f.Name(), ".vhdx") {
-		logrus.Warnf("lcowdriver: init: deleting stale scratch file %s", path)
-		return os.Remove(path)
-	}
-	return nil
-}
-
-// InitDriver returns a new LCOW storage driver.
-func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphdriver.Driver, error) {
-	title := "lcowdriver: init:"
-
-	cd := filepath.Join(dataRoot, cacheDirectory)
-	sd := filepath.Join(dataRoot, scratchDirectory)
-
-	d := &Driver{
-		dataRoot:          dataRoot,
-		options:           options,
-		cachedSandboxFile: filepath.Join(cd, sandboxFilename),
-		cachedScratchFile: filepath.Join(cd, scratchFilename),
-		serviceVms: &serviceVMMap{
-			svms: make(map[string]*serviceVMMapItem),
-		},
-		globalMode:         false,
-		defaultSandboxSize: client.DefaultVhdxSizeGB,
-	}
-
-	// Looks for relevant options
-	for _, v := range options {
-		opt := strings.SplitN(v, "=", 2)
-		if len(opt) == 2 {
-			switch strings.ToLower(opt[0]) {
-			case "lcow.globalmode":
-				var err error
-				d.globalMode, err = strconv.ParseBool(opt[1])
-				if err != nil {
-					return nil, fmt.Errorf("%s failed to parse value for 'lcow.globalmode' - must be 'true' or 'false'", title)
-				}
-				break
-			case "lcow.sandboxsize":
-				var err error
-				d.defaultSandboxSize, err = strconv.ParseUint(opt[1], 10, 32)
-				if err != nil {
-					return nil, fmt.Errorf("%s failed to parse value '%s' for 'lcow.sandboxsize'", title, v)
-				}
-				if d.defaultSandboxSize < client.DefaultVhdxSizeGB {
-					return nil, fmt.Errorf("%s 'lcow.sandboxsize' option cannot be less than %d", title, client.DefaultVhdxSizeGB)
-				}
-				break
-			}
-		}
-	}
-
-	// Make sure the dataRoot directory is created
-	if err := idtools.MkdirAllAndChown(dataRoot, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil {
-		return nil, fmt.Errorf("%s failed to create '%s': %v", title, dataRoot, err)
-	}
-
-	// Make sure the cache directory is created under dataRoot
-	if err := idtools.MkdirAllAndChown(cd, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil {
-		return nil, fmt.Errorf("%s failed to create '%s': %v", title, cd, err)
-	}
-
-	// Make sure the scratch directory is created under dataRoot
-	if err := idtools.MkdirAllAndChown(sd, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil {
-		return nil, fmt.Errorf("%s failed to create '%s': %v", title, sd, err)
-	}
-
-	// Delete any items in the scratch directory
-	filepath.Walk(sd, deletefiles)
-
-	logrus.Infof("%s dataRoot: %s globalMode: %t", title, dataRoot, d.globalMode)
-
-	return d, nil
-}
-
-func (d *Driver) getVMID(id string) string {
-	if d.globalMode {
-		return svmGlobalID
-	}
-	return id
-}
-
-// remapLongToShortContainerPath does the mapping of a long container path for a
-// SCSI attached disk, to a short container path where it's actually mounted.
-func remapLongToShortContainerPath(longContainerPath string, attachCounter uint64, svmName string) string {
-	shortContainerPath := longContainerPath
-	if shortContainerPath != "" && shortContainerPath != toolsScratchPath {
-		shortContainerPath = fmt.Sprintf("/tmp/d%d", attachCounter)
-		logrus.Debugf("lcowdriver: UVM %s: remapping %s --> %s", svmName, longContainerPath, shortContainerPath)
-	}
-	return shortContainerPath
-}
-
-// startServiceVMIfNotRunning starts a service utility VM if it is not currently running.
-// It can optionally be started with a mapped virtual disk. Returns a opengcs config structure
-// representing the VM.
-func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) {
-	// Use the global ID if in global mode
-	id = d.getVMID(id)
-
-	title := "lcowdriver: startServiceVMIfNotRunning " + id
-
-	// Attempt to add ID to the service vm map
-	logrus.Debugf("%s: adding entry to service vm map", title)
-	svm, exists, err := d.serviceVms.add(id)
-	if err != nil && err == errVMisTerminating {
-		// VM is in the process of terminating. Wait until it's done and then try again
-		logrus.Debugf("%s: VM with current ID still in the process of terminating", title)
-		if err := svm.getStopError(); err != nil {
-			logrus.Debugf("%s: VM did not stop successfully: %s", title, err)
-			return nil, err
-		}
-		return d.startServiceVMIfNotRunning(id, mvdToAdd, context)
-	} else if err != nil {
-		logrus.Debugf("%s: failed to add service vm to map: %s", title, err)
-		return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err)
-	}
-
-	if exists {
-		// Service VM is already up and running. In this case, just hot add the vhds.
-		// Note that hotAddVHDs will remap long to short container paths, so no need
-		// for us to that here.
-		logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd)
-		if err := svm.hotAddVHDs(mvdToAdd...); err != nil {
-			logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err)
-			return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err)
-		}
-		return svm, nil
-	}
-
-	// We are the first service for this id, so we need to start it
-	logrus.Debugf("%s: service vm doesn't exist. Now starting it up", title)
-
-	defer func() {
-		// Signal that start has finished, passing in the error if any.
-		svm.signalStartFinished(err)
-		if err != nil {
-			// We added a ref to the VM, since we failed, we should delete the ref.
-			d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false)
-		}
-	}()
-
-	// Generate a default configuration
-	if err := svm.config.GenerateDefault(d.options); err != nil {
-		return nil, fmt.Errorf("%s: failed to generate default gogcs configuration for global svm (%s): %s", title, context, err)
-	}
-
-	// For the name, we deliberately suffix if safe-mode to ensure that it doesn't
-	// clash with another utility VM which may be running for the container itself.
-	// This also makes it easier to correlate through Get-ComputeProcess.
-	if id == svmGlobalID {
-		svm.config.Name = svmGlobalID
-	} else {
-		svm.config.Name = fmt.Sprintf("%s_svm", id)
-	}
-
-	// Ensure we take the cached scratch mutex around the check to ensure the file is complete
-	// and not in the process of being created by another thread.
-	scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id))
-
-	logrus.Debugf("%s: locking cachedScratchMutex", title)
-	d.cachedScratchMutex.Lock()
-	if _, err := os.Stat(d.cachedScratchFile); err == nil {
-		// Make a copy of cached scratch to the scratch directory
-		logrus.Debugf("%s: (%s) cloning cached scratch for mvd", title, context)
-		if err := client.CopyFile(d.cachedScratchFile, scratchTargetFile, true); err != nil {
-			logrus.Debugf("%s: releasing cachedScratchMutex on err: %s", title, err)
-			d.cachedScratchMutex.Unlock()
-			return nil, err
-		}
-
-		// Add the cached clone as a mapped virtual disk
-		logrus.Debugf("%s: (%s) adding cloned scratch as mvd", title, context)
-		mvd := hcsshim.MappedVirtualDisk{
-			HostPath:          scratchTargetFile,
-			ContainerPath:     toolsScratchPath,
-			CreateInUtilityVM: true,
-		}
-		svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd)
-		svm.scratchAttached = true
-	}
-
-	logrus.Debugf("%s: releasing cachedScratchMutex", title)
-	d.cachedScratchMutex.Unlock()
-
-	// Add mapped virtual disks. First those that are already in the configuration. Generally,
-	// the only one that will be here is the service VMs scratch. The exception is when invoked
-	// via the graphdrivers DiffGetter implementation.
-	for i, mvd := range svm.config.MappedVirtualDisks {
-		svm.attachCounter++
-		svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter}
-
-		// No-op for the service VMs scratch disk. Only applicable in the DiffGetter interface invocation.
-		svm.config.MappedVirtualDisks[i].ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name)
-	}
-
-	// Then the remaining ones to add, and adding them to the startup configuration.
-	for _, mvd := range mvdToAdd {
-		svm.attachCounter++
-		svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter}
-		mvd.ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name)
-		svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd)
-	}
-
-	// Start it.
-	logrus.Debugf("%s: (%s) starting %s", title, context, svm.config.Name)
-	if err := svm.config.StartUtilityVM(); err != nil {
-		return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err)
-	}
-
-	// defer function to terminate the VM if the next steps fail
-	defer func() {
-		if err != nil {
-			waitTerminate(svm, fmt.Sprintf("%s: (%s)", title, context))
-		}
-	}()
-
-	// Now we have a running service VM, we can create the cached scratch file if it doesn't exist.
-	logrus.Debugf("%s: locking cachedScratchMutex", title)
-	d.cachedScratchMutex.Lock()
-	if _, err := os.Stat(d.cachedScratchFile); err != nil {
-		logrus.Debugf("%s: (%s) creating an SVM scratch", title, context)
-
-		// Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup,
-		// but we're still in that process right now.
-		if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil {
-			logrus.Debugf("%s: (%s) releasing cachedScratchMutex on error path", title, context)
-			d.cachedScratchMutex.Unlock()
-			logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err)
-			return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err)
-		}
-	}
-	logrus.Debugf("%s: (%s) releasing cachedScratchMutex", title, context)
-	d.cachedScratchMutex.Unlock()
-
-	// Hot-add the scratch-space if not already attached
-	if !svm.scratchAttached {
-		logrus.Debugf("%s: (%s) hot-adding scratch %s", title, context, scratchTargetFile)
-		if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{
-			HostPath:          scratchTargetFile,
-			ContainerPath:     toolsScratchPath,
-			CreateInUtilityVM: true,
-		}); err != nil {
-			logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err)
-			return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err)
-		}
-		svm.scratchAttached = true
-		// Don't need to ref-count here as it will be done via hotAddVHDsAtStart() call above.
-	}
-
-	logrus.Debugf("%s: (%s) success", title, context)
-	return svm, nil
-}
-
-// terminateServiceVM terminates a service utility VM if its running if it's,
-// not being used by any goroutine, but does nothing when in global mode as it's
-// lifetime is limited to that of the daemon. If the force flag is set, then
-// the VM will be killed regardless of the ref count or if it's global.
-func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) {
-	// We don't do anything in safe mode unless the force flag has been passed, which
-	// is only the case for cleanup at driver termination.
-	if d.globalMode && !force {
-		logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
-		return nil
-	}
-
-	id = d.getVMID(id)
-
-	var svm *serviceVM
-	var lastRef bool
-	if !force {
-		// In the not force case, we ref count
-		svm, lastRef, err = d.serviceVms.decrementRefCount(id)
-	} else {
-		// In the force case, we ignore the ref count and just set it to 0
-		svm, err = d.serviceVms.setRefCountZero(id)
-		lastRef = true
-	}
-
-	if err == errVMUnknown {
-		return nil
-	} else if err == errVMisTerminating {
-		return svm.getStopError()
-	} else if !lastRef {
-		return nil
-	}
-
-	// We run the deletion of the scratch as a deferred function to at least attempt
-	// clean-up in case of errors.
-	defer func() {
-		if svm.scratchAttached {
-			scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id))
-			logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile)
-			if errRemove := os.Remove(scratchTargetFile); errRemove != nil {
-				logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove)
-				err = errRemove
-			}
-		}
-
-		// This function shouldn't actually return error unless there is a bug
-		if errDelete := d.serviceVms.deleteID(id); errDelete != nil {
-			logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete)
-		}
-
-		// Signal that this VM has stopped
-		svm.signalStopFinished(err)
-	}()
-
-	// Now it's possible that the service VM failed to start and now we are trying to terminate it.
-	// In this case, we will relay the error to the goroutines waiting for this vm to stop.
-	if err := svm.getStartError(); err != nil {
-		logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err)
-		return err
-	}
-
-	if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil {
-		return err
-	}
-
-	logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context)
-	return nil
-}
-
-func waitTerminate(svm *serviceVM, context string) error {
-	if svm.config == nil {
-		return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context)
-	}
-
-	logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context)
-	if err := svm.config.Uvm.Terminate(); err != nil {
-		// We might get operation still pending from the HCS. In that case, we shouldn't return
-		// an error since we call wait right after.
-		underlyingError := err
-		if conterr, ok := err.(*hcsshim.ContainerError); ok {
-			underlyingError = conterr.Err
-		}
-
-		if syscallErr, ok := underlyingError.(syscall.Errno); ok {
-			underlyingError = syscallErr
-		}
-
-		if underlyingError != errOperationPending {
-			return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
-		}
-		logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context)
-	}
-
-	logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context)
-	if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
-		return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
-	}
-	return nil
-}
-
-// String returns the string representation of a driver. This should match
-// the name the graph driver has been registered with.
-func (d *Driver) String() string {
-	return "lcow"
-}
-
-// Status returns the status of the driver.
-func (d *Driver) Status() [][2]string {
-	return [][2]string{
-		{"LCOW", ""},
-		// TODO: Add some more info here - mode, home, ....
-	}
-}
-
-// Exists returns true if the given id is registered with this driver.
-func (d *Driver) Exists(id string) bool {
-	_, err := os.Lstat(d.dir(id))
-	logrus.Debugf("lcowdriver: exists: id %s %t", id, err == nil)
-	return err == nil
-}
-
-// CreateReadWrite creates a layer that is writable for use as a container
-// file system. That equates to creating a sandbox.
-func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
-	title := fmt.Sprintf("lcowdriver: createreadwrite: id %s", id)
-	logrus.Debugf(title)
-
-	// First we need to create the folder
-	if err := d.Create(id, parent, opts); err != nil {
-		return err
-	}
-
-	// Look for an explicit sandbox size option.
-	sandboxSize := d.defaultSandboxSize
-	for k, v := range opts.StorageOpt {
-		switch strings.ToLower(k) {
-		case "lcow.sandboxsize":
-			var err error
-			sandboxSize, err = strconv.ParseUint(v, 10, 32)
-			if err != nil {
-				return fmt.Errorf("%s failed to parse value '%s' for 'lcow.sandboxsize'", title, v)
-			}
-			if sandboxSize < client.DefaultVhdxSizeGB {
-				return fmt.Errorf("%s 'lcow.sandboxsize' option cannot be less than %d", title, client.DefaultVhdxSizeGB)
-			}
-			break
-		}
-	}
-
-	// Massive perf optimisation here. If we know that the RW layer is the default size,
-	// and that the cached sandbox already exists, and we are running in safe mode, we
-	// can just do a simple copy into the layers sandbox file without needing to start a
-	// unique service VM. For a global service VM, it doesn't really matter. Of course,
-	// this is only the case where the sandbox is the default size.
-	//
-	// Make sure we have the sandbox mutex taken while we are examining it.
-	if sandboxSize == client.DefaultVhdxSizeGB {
-		logrus.Debugf("%s: locking cachedSandboxMutex", title)
-		d.cachedSandboxMutex.Lock()
-		_, err := os.Stat(d.cachedSandboxFile)
-		logrus.Debugf("%s: releasing cachedSandboxMutex", title)
-		d.cachedSandboxMutex.Unlock()
-		if err == nil {
-			logrus.Debugf("%s: using cached sandbox to populate", title)
-			if err := client.CopyFile(d.cachedSandboxFile, filepath.Join(d.dir(id), sandboxFilename), true); err != nil {
-				return err
-			}
-			return nil
-		}
-	}
-
-	logrus.Debugf("%s: creating SVM to create sandbox", title)
-	svm, err := d.startServiceVMIfNotRunning(id, nil, "createreadwrite")
-	if err != nil {
-		return err
-	}
-	defer d.terminateServiceVM(id, "createreadwrite", false)
-
-	// So the sandbox needs creating. If default size ensure we are the only thread populating the cache.
-	// Non-default size we don't store, just create them one-off so no need to lock the cachedSandboxMutex.
-	if sandboxSize == client.DefaultVhdxSizeGB {
-		logrus.Debugf("%s: locking cachedSandboxMutex for creation", title)
-		d.cachedSandboxMutex.Lock()
-		defer func() {
-			logrus.Debugf("%s: releasing cachedSandboxMutex for creation", title)
-			d.cachedSandboxMutex.Unlock()
-		}()
-	}
-
-	// Make sure we don't write to our local cached copy if this is for a non-default size request.
-	targetCacheFile := d.cachedSandboxFile
-	if sandboxSize != client.DefaultVhdxSizeGB {
-		targetCacheFile = ""
-	}
-
-	// Create the ext4 vhdx
-	logrus.Debugf("%s: creating sandbox ext4 vhdx", title)
-	if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
-		logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err)
-		return err
-	}
-	return nil
-}
-
-// Create creates the folder for the layer with the given id, and
-// adds it to the layer chain.
-func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
-	logrus.Debugf("lcowdriver: create: id %s parent: %s", id, parent)
-
-	parentChain, err := d.getLayerChain(parent)
-	if err != nil {
-		return err
-	}
-
-	var layerChain []string
-	if parent != "" {
-		if !d.Exists(parent) {
-			return fmt.Errorf("lcowdriver: cannot create layer folder with missing parent %s", parent)
-		}
-		layerChain = []string{d.dir(parent)}
-	}
-	layerChain = append(layerChain, parentChain...)
-
-	layerPath := d.dir(id)
-	logrus.Debugf("lcowdriver: create: id %s: creating %s", id, layerPath)
-	// Standard mkdir here, not with SDDL as the dataroot was created with
-	// inheritance to just local system and administrators.
-	if err := os.MkdirAll(layerPath, 0700); err != nil {
-		return err
-	}
-
-	if err := d.setLayerChain(id, layerChain); err != nil {
-		if err2 := os.RemoveAll(layerPath); err2 != nil {
-			logrus.Warnf("failed to remove layer %s: %s", layerPath, err2)
-		}
-		return err
-	}
-	logrus.Debugf("lcowdriver: create: id %s: success", id)
-
-	return nil
-}
-
-// Remove unmounts and removes the dir information.
-func (d *Driver) Remove(id string) error {
-	logrus.Debugf("lcowdriver: remove: id %s", id)
-	tmpID := fmt.Sprintf("%s-removing", id)
-	tmpLayerPath := d.dir(tmpID)
-	layerPath := d.dir(id)
-
-	logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath)
-
-	// Unmount all the layers
-	err := d.Put(id)
-	if err != nil {
-		logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err)
-		return err
-	}
-
-	// for non-global case just kill the vm
-	if !d.globalMode {
-		if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil {
-			return err
-		}
-	}
-
-	if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
-		return err
-	}
-
-	if err := os.RemoveAll(tmpLayerPath); err != nil {
-		return err
-	}
-
-	logrus.Debugf("lcowdriver: remove: id %s: layerPath %s succeeded", id, layerPath)
-	return nil
-}
-
-// Get returns the rootfs path for the id. It is reference counted and
-// effectively can be thought of as a "mount the layer into the utility
-// vm if it isn't already". The contract from the caller of this is that
-// all Gets and Puts are matched. It -should- be the case that on cleanup,
-// nothing is mounted.
-//
-// For optimisation, we don't actually mount the filesystem (which in our
-// case means [hot-]adding it to a service VM. But we track that and defer
-// the actual adding to the point we need to access it.
-func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
-	title := fmt.Sprintf("lcowdriver: get: %s", id)
-	logrus.Debugf(title)
-
-	// Generate the mounts needed for the deferred operation.
-	disks, err := d.getAllMounts(id)
-	if err != nil {
-		logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
-		return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
-	}
-
-	logrus.Debugf("%s: got layer mounts: %+v", title, disks)
-	return &lcowfs{
-		root:        unionMountName(disks),
-		d:           d,
-		mappedDisks: disks,
-		vmID:        d.getVMID(id),
-	}, nil
-}
-
-// Put does the reverse of get. If there are no more references to
-// the layer, it unmounts it from the utility VM.
-func (d *Driver) Put(id string) error {
-	title := fmt.Sprintf("lcowdriver: put: %s", id)
-
-	// Get the service VM that we need to remove from
-	svm, err := d.serviceVms.get(d.getVMID(id))
-	if err == errVMUnknown {
-		return nil
-	} else if err == errVMisTerminating {
-		return svm.getStopError()
-	}
-
-	// Generate the mounts that Get() might have mounted
-	disks, err := d.getAllMounts(id)
-	if err != nil {
-		logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
-		return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
-	}
-
-	// Now, we want to perform the unmounts, hot-remove and stop the service vm.
-	// We want to go though all the steps even if we have an error to clean up properly
-	err = svm.deleteUnionMount(unionMountName(disks), disks...)
-	if err != nil {
-		logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err)
-	}
-
-	err1 := svm.hotRemoveVHDs(disks...)
-	if err1 != nil {
-		logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err)
-		if err == nil {
-			err = err1
-		}
-	}
-
-	err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false)
-	if err1 != nil {
-		logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1)
-		if err == nil {
-			err = err1
-		}
-	}
-	logrus.Debugf("Put succeeded on id %s", id)
-	return err
-}
-
-// Cleanup ensures the information the driver stores is properly removed.
-// We use this opportunity to cleanup any -removing folders which may be
-// still left if the daemon was killed while it was removing a layer.
-func (d *Driver) Cleanup() error {
-	title := "lcowdriver: cleanup"
-
-	items, err := ioutil.ReadDir(d.dataRoot)
-	if err != nil {
-		if os.IsNotExist(err) {
-			return nil
-		}
-		return err
-	}
-
-	// Note we don't return an error below - it's possible the files
-	// are locked. However, next time around after the daemon exits,
-	// we likely will be able to cleanup successfully. Instead we log
-	// warnings if there are errors.
-	for _, item := range items {
-		if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") {
-			if err := os.RemoveAll(filepath.Join(d.dataRoot, item.Name())); err != nil {
-				logrus.Warnf("%s failed to cleanup %s: %s", title, item.Name(), err)
-			} else {
-				logrus.Infof("%s cleaned up %s", title, item.Name())
-			}
-		}
-	}
-
-	// Cleanup any service VMs we have running, along with their scratch spaces.
-	// We don't take the lock for this as it's taken in terminateServiceVm.
-	for k, v := range d.serviceVms.svms {
-		logrus.Debugf("%s svm entry: %s: %+v", title, k, v)
-		d.terminateServiceVM(k, "cleanup", true)
-	}
-
-	return nil
-}
-
-// Diff takes a layer (and it's parent layer which may be null, but
-// is ignored by this implementation below) and returns a reader for
-// a tarstream representing the layers contents. The id could be
-// a read-only "layer.vhd" or a read-write "sandbox.vhdx". The semantics
-// of this function dictate that the layer is already mounted.
-// However, as we do lazy mounting as a performance optimisation,
-// this will likely not be the case.
-func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
-	title := fmt.Sprintf("lcowdriver: diff: %s", id)
-
-	// Get VHDX info
-	ld, err := getLayerDetails(d.dir(id))
-	if err != nil {
-		logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err)
-		return nil, err
-	}
-
-	// Start the SVM with a mapped virtual disk. Note that if the SVM is
-	// already running and we are in global mode, this will be
-	// hot-added.
-	mvd := hcsshim.MappedVirtualDisk{
-		HostPath:          ld.filename,
-		ContainerPath:     hostToGuest(ld.filename),
-		CreateInUtilityVM: true,
-		ReadOnly:          true,
-	}
-
-	logrus.Debugf("%s: starting service VM", title)
-	svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id))
-	if err != nil {
-		return nil, err
-	}
-
-	logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting")
-	err = svm.getStartError()
-	if err != nil {
-		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
-		return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err)
-	}
-
-	// Obtain the tar stream for it
-	// The actual container path will have be remapped to a short name, so use that.
-	actualContainerPath := svm.getShortContainerPath(&mvd)
-	if actualContainerPath == "" {
-		return nil, fmt.Errorf("failed to get short container path for %+v in SVM %s", mvd, svm.config.Name)
-	}
-	logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, actualContainerPath, ld.size, ld.isSandbox)
-	tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, actualContainerPath, ld.isSandbox, ld.size)
-	if err != nil {
-		svm.hotRemoveVHDs(mvd)
-		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
-		return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err)
-	}
-
-	logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent)
-
-	// In safe/non-global mode, we can't tear down the service VM until things have been read.
-	return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
-		tarReadCloser.Close()
-		svm.hotRemoveVHDs(mvd)
-		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
-		return nil
-	}), nil
-}
-
-// ApplyDiff extracts the changeset from the given diff into the
-// layer with the specified id and parent, returning the size of the
-// new layer in bytes. The layer should not be mounted when calling
-// this function. Another way of describing this is that ApplyDiff writes
-// to a new layer (a VHD in LCOW) the contents of a tarstream it's given.
-func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
-	logrus.Debugf("lcowdriver: applydiff: id %s", id)
-
-	// Log failures here as it's undiagnosable sometimes, due to a possible panic.
-	// See https://github.com/moby/moby/issues/37955 for more information.
-
-	dest := filepath.Join(d.dataRoot, id, layerFilename)
-	if !noreexec {
-		cmd := reexec.Command([]string{"docker-lcow-tar2ext4", dest}...)
-		stdout := bytes.NewBuffer(nil)
-		stderr := bytes.NewBuffer(nil)
-		cmd.Stdin = diff
-		cmd.Stdout = stdout
-		cmd.Stderr = stderr
-
-		if err := cmd.Start(); err != nil {
-			logrus.Warnf("lcowdriver: applydiff: id %s failed to start re-exec: %s", id, err)
-			return 0, err
-		}
-
-		if err := cmd.Wait(); err != nil {
-			logrus.Warnf("lcowdriver: applydiff: id %s failed %s", id, err)
-			return 0, fmt.Errorf("re-exec error: %v: stderr: %s", err, stderr)
-		}
-
-		size, err := strconv.ParseInt(stdout.String(), 10, 64)
-		if err != nil {
-			logrus.Warnf("lcowdriver: applydiff: id %s failed to parse output %s", id, err)
-			return 0, fmt.Errorf("re-exec error: %v: stdout: %s", err, stdout)
-		}
-		return applySID(id, size, dest)
-
-	}
-	// The inline case
-	size, err := tar2ext4Actual(dest, diff)
-	if err != nil {
-		logrus.Warnf("lcowdriver: applydiff: id %s failed %s", id, err)
-	}
-	return applySID(id, size, dest)
-}
-
-// applySID adds the VM Group SID read-only access.
-func applySID(id string, size int64, dest string) (int64, error) {
-	if err := security.GrantVmGroupAccess(dest); err != nil {
-		logrus.Warnf("lcowdriver: applySIDs: id %s failed %s", id, err)
-		return 0, err
-	}
-	return size, nil
-}
-
-// tar2ext4Reexec is the re-exec entry point for writing a layer from a tar file
-func tar2ext4Reexec() {
-	size, err := tar2ext4Actual(os.Args[1], os.Stdin)
-	if err != nil {
-		fmt.Fprint(os.Stderr, err)
-		os.Exit(1)
-	}
-	fmt.Fprint(os.Stdout, size)
-}
-
-// tar2ext4Actual is the implementation of tar2ext to write a layer from a tar file.
-// It can be called through re-exec (default), or inline for debugging.
-func tar2ext4Actual(dest string, diff io.Reader) (int64, error) {
-	// maxDiskSize is not relating to the sandbox size - this is the
-	// maximum possible size a layer VHD generated can be from an EXT4
-	// layout perspective.
-	const maxDiskSize = 128 * 1024 * 1024 * 1024 // 128GB
-	out, err := os.Create(dest)
-	if err != nil {
-		return 0, err
-	}
-	defer out.Close()
-	if err := tar2ext4.Convert(
-		diff,
-		out,
-		tar2ext4.AppendVhdFooter,
-		tar2ext4.ConvertWhiteout,
-		tar2ext4.MaximumDiskSize(maxDiskSize)); err != nil {
-		return 0, err
-	}
-	fi, err := os.Stat(dest)
-	if err != nil {
-		return 0, err
-	}
-	return fi.Size(), nil
-}
-
-// Changes produces a list of changes between the specified layer
-// and its parent layer. If parent is "", then all changes will be ADD changes.
-// The layer should not be mounted when calling this function.
-func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
-	logrus.Debugf("lcowdriver: changes: id %s parent %s", id, parent)
-	// TODO @gupta-ak. Needs implementation with assistance from service VM
-	return nil, nil
-}
-
-// DiffSize calculates the changes between the specified layer
-// and its parent and returns the size in bytes of the changes
-// relative to its base filesystem directory.
-func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
-	logrus.Debugf("lcowdriver: diffsize: id %s", id)
-	// TODO @gupta-ak. Needs implementation with assistance from service VM
-	return 0, nil
-}
-
-// GetMetadata returns custom driver information.
-func (d *Driver) GetMetadata(id string) (map[string]string, error) {
-	logrus.Debugf("lcowdriver: getmetadata: id %s", id)
-	m := make(map[string]string)
-	m["dir"] = d.dir(id)
-	return m, nil
-}
-
-// GetLayerPath gets the layer path on host (path to VHD/VHDX)
-func (d *Driver) GetLayerPath(id string) (string, error) {
-	return d.dir(id), nil
-}
-
-// dir returns the absolute path to the layer.
-func (d *Driver) dir(id string) string {
-	return filepath.Join(d.dataRoot, filepath.Base(id))
-}
-
-// getLayerChain returns the layer chain information.
-func (d *Driver) getLayerChain(id string) ([]string, error) {
-	jPath := filepath.Join(d.dir(id), "layerchain.json")
-	logrus.Debugf("lcowdriver: getlayerchain: id %s json %s", id, jPath)
-	content, err := ioutil.ReadFile(jPath)
-	if os.IsNotExist(err) {
-		return nil, nil
-	} else if err != nil {
-		return nil, fmt.Errorf("lcowdriver: getlayerchain: %s unable to read layerchain file %s: %s", id, jPath, err)
-	}
-
-	var layerChain []string
-	err = json.Unmarshal(content, &layerChain)
-	if err != nil {
-		return nil, fmt.Errorf("lcowdriver: getlayerchain: %s failed to unmarshall layerchain file %s: %s", id, jPath, err)
-	}
-	return layerChain, nil
-}
-
-// setLayerChain stores the layer chain information on disk.
-func (d *Driver) setLayerChain(id string, chain []string) error {
-	content, err := json.Marshal(&chain)
-	if err != nil {
-		return fmt.Errorf("lcowdriver: setlayerchain: %s failed to marshall layerchain json: %s", id, err)
-	}
-
-	jPath := filepath.Join(d.dir(id), "layerchain.json")
-	logrus.Debugf("lcowdriver: setlayerchain: id %s json %s", id, jPath)
-	err = ioutil.WriteFile(jPath, content, 0600)
-	if err != nil {
-		return fmt.Errorf("lcowdriver: setlayerchain: %s failed to write layerchain file: %s", id, err)
-	}
-	return nil
-}
-
-// getLayerDetails is a utility for getting a file name, size and indication of
-// sandbox for a VHD(x) in a folder. A read-only layer will be layer.vhd. A
-// read-write layer will be sandbox.vhdx.
-func getLayerDetails(folder string) (*layerDetails, error) {
-	var fileInfo os.FileInfo
-	ld := &layerDetails{
-		isSandbox: false,
-		filename:  filepath.Join(folder, layerFilename),
-	}
-
-	fileInfo, err := os.Stat(ld.filename)
-	if err != nil {
-		ld.filename = filepath.Join(folder, sandboxFilename)
-		if fileInfo, err = os.Stat(ld.filename); err != nil {
-			return nil, fmt.Errorf("failed to locate layer or sandbox in %s", folder)
-		}
-		ld.isSandbox = true
-	}
-	ld.size = fileInfo.Size()
-
-	return ld, nil
-}
-
-func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) {
-	layerChain, err := d.getLayerChain(id)
-	if err != nil {
-		return nil, err
-	}
-	layerChain = append([]string{d.dir(id)}, layerChain...)
-
-	logrus.Debugf("getting all  layers: %v", layerChain)
-	disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain))
-	for i := range layerChain {
-		ld, err := getLayerDetails(layerChain[i])
-		if err != nil {
-			logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err)
-			return nil, err
-		}
-		disks[i].HostPath = ld.filename
-		disks[i].ContainerPath = hostToGuest(ld.filename)
-		disks[i].CreateInUtilityVM = true
-		disks[i].ReadOnly = !ld.isSandbox
-	}
-	return disks, nil
-}
-
-func hostToGuest(hostpath string) string {
-	// This is the "long" container path. At the point of which we are
-	// calculating this, we don't know which service VM we're going to be
-	// using, so we can't translate this to a short path yet, instead
-	// deferring until the point of which it's added to an SVM. We don't
-	// use long container paths in SVMs for SCSI disks, otherwise it can cause
-	// command line operations that we invoke to fail due to being over ~4200
-	// characters when there are ~47 layers involved. An example of this is
-	// the mount call to create the overlay across multiple SCSI-attached disks.
-	// It doesn't affect VPMem attached layers during container creation as
-	// these get mapped by openGCS to /tmp/N/M where N is a container instance
-	// number, and M is a layer number.
-	return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath)))
-}
-
-func unionMountName(disks []hcsshim.MappedVirtualDisk) string {
-	return fmt.Sprintf("%s-mount", disks[0].ContainerPath)
-}
-
-type nopCloser struct {
-	io.Reader
-}
-
-func (nopCloser) Close() error {
-	return nil
-}
-
-type fileGetCloserFromSVM struct {
-	id  string
-	svm *serviceVM
-	mvd *hcsshim.MappedVirtualDisk
-	d   *Driver
-}
-
-func (fgc *fileGetCloserFromSVM) Close() error {
-	if fgc.svm != nil {
-		if fgc.mvd != nil {
-			if err := fgc.svm.hotRemoveVHDs(*fgc.mvd); err != nil {
-				// We just log this as we're going to tear down the SVM imminently unless in global mode
-				logrus.Errorf("failed to remove mvd %s: %s", fgc.mvd.ContainerPath, err)
-			}
-		}
-	}
-	if fgc.d != nil && fgc.svm != nil && fgc.id != "" {
-		if err := fgc.d.terminateServiceVM(fgc.id, fmt.Sprintf("diffgetter %s", fgc.id), false); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (fgc *fileGetCloserFromSVM) Get(filename string) (io.ReadCloser, error) {
-	errOut := &bytes.Buffer{}
-	outOut := &bytes.Buffer{}
-	// Must map to the actual "short" container path where the SCSI disk was mounted
-	actualContainerPath := fgc.svm.getShortContainerPath(fgc.mvd)
-	if actualContainerPath == "" {
-		return nil, fmt.Errorf("inconsistency detected: couldn't get short container path for %+v in utility VM %s", fgc.mvd, fgc.svm.config.Name)
-	}
-	file := path.Join(actualContainerPath, filename)
-
-	// Ugly fix for MSFT internal bug VSO#19696554
-	// If a file name contains a space, pushing an image fails.
-	// Using solution from https://groups.google.com/forum/#!topic/Golang-Nuts/DpldsmrhPio to escape for shell execution
-	file = "'" + strings.Join(strings.Split(file, "'"), `'"'"'`) + "'"
-	if err := fgc.svm.runProcess(fmt.Sprintf("cat %s", file), nil, outOut, errOut); err != nil {
-		logrus.Debugf("cat %s failed: %s", file, errOut.String())
-		return nil, err
-	}
-	return nopCloser{bytes.NewReader(outOut.Bytes())}, nil
-}
-
-// DiffGetter returns a FileGetCloser that can read files from the directory that
-// contains files for the layer differences. Used for direct access for tar-split.
-func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
-	title := fmt.Sprintf("lcowdriver: diffgetter: %s", id)
-	logrus.Debugf(title)
-
-	ld, err := getLayerDetails(d.dir(id))
-	if err != nil {
-		logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err)
-		return nil, err
-	}
-
-	// Start the SVM with a mapped virtual disk. Note that if the SVM is
-	// already running and we are in global mode, this will be hot-added.
-	mvd := hcsshim.MappedVirtualDisk{
-		HostPath:          ld.filename,
-		ContainerPath:     hostToGuest(ld.filename),
-		CreateInUtilityVM: true,
-		ReadOnly:          true,
-	}
-
-	logrus.Debugf("%s: starting service VM", title)
-	svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diffgetter %s", id))
-	if err != nil {
-		return nil, err
-	}
-
-	logrus.Debugf("%s: waiting for svm to finish booting", title)
-	err = svm.getStartError()
-	if err != nil {
-		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
-		return nil, fmt.Errorf("%s: svm failed to boot: %s", title, err)
-	}
-
-	return &fileGetCloserFromSVM{
-		id:  id,
-		svm: svm,
-		mvd: &mvd,
-		d:   d}, nil
-}

+ 0 - 421
daemon/graphdriver/lcow/lcow_svm.go

@@ -1,421 +0,0 @@
-// +build windows
-
-package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/Microsoft/hcsshim"
-	"github.com/Microsoft/opengcs/client"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-)
-
-// Code for all the service VM management for the LCOW graphdriver
-
-var errVMisTerminating = errors.New("service VM is shutting down")
-var errVMUnknown = errors.New("service vm id is unknown")
-var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used")
-
-// serviceVMMap is the struct representing the id -> service VM mapping.
-type serviceVMMap struct {
-	sync.Mutex
-	svms map[string]*serviceVMMapItem
-}
-
-// serviceVMMapItem is our internal structure representing an item in our
-// map of service VMs we are maintaining.
-type serviceVMMapItem struct {
-	svm      *serviceVM // actual service vm object
-	refCount int        // refcount for VM
-}
-
-// attachedVHD is for reference counting SCSI disks attached to a service VM,
-// and for a counter used to generate a short path name for the container path.
-type attachedVHD struct {
-	refCount      int
-	attachCounter uint64
-}
-
-type serviceVM struct {
-	sync.Mutex                     // Serialises operations being performed in this service VM.
-	scratchAttached bool           // Has a scratch been attached?
-	config          *client.Config // Represents the service VM item.
-
-	// Indicates that the vm is started
-	startStatus chan interface{}
-	startError  error
-
-	// Indicates that the vm is stopped
-	stopStatus chan interface{}
-	stopError  error
-
-	attachCounter uint64                  // Increasing counter for each add
-	attachedVHDs  map[string]*attachedVHD // Map ref counting all the VHDS we've hot-added/hot-removed.
-	unionMounts   map[string]int          // Map ref counting all the union filesystems we mounted.
-}
-
-// add will add an id to the service vm map. There are three cases:
-// 	- entry doesn't exist:
-// 		- add id to map and return a new vm that the caller can manually configure+start
-//	- entry does exist
-//  	- return vm in map and increment ref count
-//  - entry does exist but the ref count is 0
-//		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
-func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) {
-	svmMap.Lock()
-	defer svmMap.Unlock()
-	if svm, ok := svmMap.svms[id]; ok {
-		if svm.refCount == 0 {
-			return svm.svm, true, errVMisTerminating
-		}
-		svm.refCount++
-		return svm.svm, true, nil
-	}
-
-	// Doesn't exist, so create an empty svm to put into map and return
-	newSVM := &serviceVM{
-		startStatus:  make(chan interface{}),
-		stopStatus:   make(chan interface{}),
-		attachedVHDs: make(map[string]*attachedVHD),
-		unionMounts:  make(map[string]int),
-		config:       &client.Config{},
-	}
-	svmMap.svms[id] = &serviceVMMapItem{
-		svm:      newSVM,
-		refCount: 1,
-	}
-	return newSVM, false, nil
-}
-
-// get will get the service vm from the map. There are three cases:
-// 	- entry doesn't exist:
-// 		- return errVMUnknown
-//	- entry does exist
-//  	- return vm with no error
-//  - entry does exist but the ref count is 0
-//		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
-func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) {
-	svmMap.Lock()
-	defer svmMap.Unlock()
-	svm, ok := svmMap.svms[id]
-	if !ok {
-		return nil, errVMUnknown
-	}
-	if svm.refCount == 0 {
-		return svm.svm, errVMisTerminating
-	}
-	return svm.svm, nil
-}
-
-// decrementRefCount decrements the ref count of the given ID from the map. There are four cases:
-// 	- entry doesn't exist:
-// 		- return errVMUnknown
-//  - entry does exist but the ref count is 0
-//		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
-//	- entry does exist but ref count is 1
-//  	- return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map
-//      - and execute svm.signalStopFinished to signal the threads that the svm has been terminated.
-//	- entry does exist and ref count > 1
-//		- just reduce ref count and return svm
-func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) {
-	svmMap.Lock()
-	defer svmMap.Unlock()
-
-	svm, ok := svmMap.svms[id]
-	if !ok {
-		return nil, false, errVMUnknown
-	}
-	if svm.refCount == 0 {
-		return svm.svm, false, errVMisTerminating
-	}
-	svm.refCount--
-	return svm.svm, svm.refCount == 0, nil
-}
-
-// setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it.
-func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) {
-	svmMap.Lock()
-	defer svmMap.Unlock()
-
-	svm, ok := svmMap.svms[id]
-	if !ok {
-		return nil, errVMUnknown
-	}
-	if svm.refCount == 0 {
-		return svm.svm, errVMisTerminating
-	}
-	svm.refCount = 0
-	return svm.svm, nil
-}
-
-// deleteID deletes the given ID from the map. If the refcount is not 0 or the
-// VM does not exist, then this function returns an error.
-func (svmMap *serviceVMMap) deleteID(id string) error {
-	svmMap.Lock()
-	defer svmMap.Unlock()
-	svm, ok := svmMap.svms[id]
-	if !ok {
-		return errVMUnknown
-	}
-	if svm.refCount != 0 {
-		return errVMStillHasReference
-	}
-	delete(svmMap.svms, id)
-	return nil
-}
-
-func (svm *serviceVM) signalStartFinished(err error) {
-	svm.Lock()
-	svm.startError = err
-	svm.Unlock()
-	close(svm.startStatus)
-}
-
-func (svm *serviceVM) getStartError() error {
-	<-svm.startStatus
-	svm.Lock()
-	defer svm.Unlock()
-	return svm.startError
-}
-
-func (svm *serviceVM) signalStopFinished(err error) {
-	svm.Lock()
-	svm.stopError = err
-	svm.Unlock()
-	close(svm.stopStatus)
-}
-
-func (svm *serviceVM) getStopError() error {
-	<-svm.stopStatus
-	svm.Lock()
-	defer svm.Unlock()
-	return svm.stopError
-}
-
-// hotAddVHDs waits for the service vm to start and then attaches the vhds.
-func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
-	if err := svm.getStartError(); err != nil {
-		return err
-	}
-	return svm.hotAddVHDsAtStart(mvds...)
-}
-
-// hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start.
-func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error {
-	svm.Lock()
-	defer svm.Unlock()
-	for i, mvd := range mvds {
-		if _, ok := svm.attachedVHDs[mvd.HostPath]; ok {
-			svm.attachedVHDs[mvd.HostPath].refCount++
-			logrus.Debugf("lcowdriver: UVM %s: %s already present, refCount now %d", svm.config.Name, mvd.HostPath, svm.attachedVHDs[mvd.HostPath].refCount)
-			continue
-		}
-
-		svm.attachCounter++
-		shortContainerPath := remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name)
-		if err := svm.config.HotAddVhd(mvd.HostPath, shortContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil {
-			svm.hotRemoveVHDsNoLock(mvds[:i]...)
-			return err
-		}
-		svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter}
-	}
-	return nil
-}
-
-// hotRemoveVHDs waits for the service vm to start and then removes the vhds.
-// The service VM must not be locked when calling this function.
-func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
-	if err := svm.getStartError(); err != nil {
-		return err
-	}
-	svm.Lock()
-	defer svm.Unlock()
-	return svm.hotRemoveVHDsNoLock(mvds...)
-}
-
-// hotRemoveVHDsNoLock removes VHDs from a service VM. When calling this function,
-// the contract is the service VM lock must be held.
-func (svm *serviceVM) hotRemoveVHDsNoLock(mvds ...hcsshim.MappedVirtualDisk) error {
-	var retErr error
-	for _, mvd := range mvds {
-		if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok {
-			// We continue instead of returning an error if we try to hot remove a non-existent VHD.
-			// This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get()
-			// defers the VM start to the first operation, it's possible that nothing have been hot-added
-			// when Put() is called. To avoid Put returning an error in that case, we simply continue if we
-			// don't find the vhd attached.
-			logrus.Debugf("lcowdriver: UVM %s: %s is not attached, not doing anything", svm.config.Name, mvd.HostPath)
-			continue
-		}
-
-		if svm.attachedVHDs[mvd.HostPath].refCount > 1 {
-			svm.attachedVHDs[mvd.HostPath].refCount--
-			logrus.Debugf("lcowdriver: UVM %s: %s refCount dropped to %d. not removing from UVM", svm.config.Name, mvd.HostPath, svm.attachedVHDs[mvd.HostPath].refCount)
-			continue
-		}
-
-		// last reference to VHD, so remove from VM and map
-		if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil {
-			delete(svm.attachedVHDs, mvd.HostPath)
-		} else {
-			// Take note of the error, but still continue to remove the other VHDs
-			logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err)
-			if retErr == nil {
-				retErr = err
-			}
-		}
-	}
-	return retErr
-}
-
-func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error {
-	if err := svm.getStartError(); err != nil {
-		return err
-	}
-
-	svm.Lock()
-	defer svm.Unlock()
-	return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile)
-}
-
-// getShortContainerPath looks up where a SCSI disk was actually mounted
-// in a service VM when we remapped a long path name to a short name.
-func (svm *serviceVM) getShortContainerPath(mvd *hcsshim.MappedVirtualDisk) string {
-	if mvd.ContainerPath == "" {
-		return ""
-	}
-	avhd, ok := svm.attachedVHDs[mvd.HostPath]
-	if !ok {
-		return ""
-	}
-	return fmt.Sprintf("/tmp/d%d", avhd.attachCounter)
-}
-
-func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) {
-	if len(mvds) == 0 {
-		return fmt.Errorf("createUnionMount: error must have at least 1 layer")
-	}
-
-	if err = svm.getStartError(); err != nil {
-		return err
-	}
-
-	svm.Lock()
-	defer svm.Unlock()
-	if _, ok := svm.unionMounts[mountName]; ok {
-		svm.unionMounts[mountName]++
-		return nil
-	}
-
-	var lowerLayers []string
-	if mvds[0].ReadOnly {
-		lowerLayers = append(lowerLayers, svm.getShortContainerPath(&mvds[0]))
-	}
-
-	for i := 1; i < len(mvds); i++ {
-		lowerLayers = append(lowerLayers, svm.getShortContainerPath(&mvds[i]))
-	}
-
-	logrus.Debugf("Doing the overlay mount with union directory=%s", mountName)
-	errOut := &bytes.Buffer{}
-	if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, errOut); err != nil {
-		return errors.Wrapf(err, "mkdir -p %s failed (%s)", mountName, errOut.String())
-	}
-
-	var cmd string
-	if len(mvds) == 1 {
-		// `FROM SCRATCH` case and the only layer. No overlay required.
-		cmd = fmt.Sprintf("mount %s %s", svm.getShortContainerPath(&mvds[0]), mountName)
-	} else if mvds[0].ReadOnly {
-		// Readonly overlay
-		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s",
-			strings.Join(lowerLayers, ","),
-			mountName)
-	} else {
-		upper := fmt.Sprintf("%s/upper", svm.getShortContainerPath(&mvds[0]))
-		work := fmt.Sprintf("%s/work", svm.getShortContainerPath(&mvds[0]))
-
-		errOut := &bytes.Buffer{}
-		if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, errOut); err != nil {
-			return errors.Wrapf(err, "mkdir -p %s failed (%s)", mountName, errOut.String())
-		}
-
-		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s",
-			strings.Join(lowerLayers, ":"),
-			upper,
-			work,
-			mountName)
-	}
-
-	logrus.Debugf("createUnionMount: Executing mount=%s", cmd)
-	errOut = &bytes.Buffer{}
-	if err = svm.runProcess(cmd, nil, nil, errOut); err != nil {
-		return errors.Wrapf(err, "%s failed (%s)", cmd, errOut.String())
-	}
-
-	svm.unionMounts[mountName] = 1
-	return nil
-}
-
-func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error {
-	if err := svm.getStartError(); err != nil {
-		return err
-	}
-
-	svm.Lock()
-	defer svm.Unlock()
-	if _, ok := svm.unionMounts[mountName]; !ok {
-		return nil
-	}
-
-	if svm.unionMounts[mountName] > 1 {
-		svm.unionMounts[mountName]--
-		return nil
-	}
-
-	logrus.Debugf("Removing union mount %s", mountName)
-	if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil {
-		return err
-	}
-
-	delete(svm.unionMounts, mountName)
-	return nil
-}
-
-func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
-	var process hcsshim.Process
-	var err error
-	errOut := &bytes.Buffer{}
-
-	if stderr != nil {
-		process, err = svm.config.RunProcess(command, stdin, stdout, stderr)
-	} else {
-		process, err = svm.config.RunProcess(command, stdin, stdout, errOut)
-	}
-	if err != nil {
-		return err
-	}
-	defer process.Close()
-
-	process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds))
-	exitCode, err := process.ExitCode()
-	if err != nil {
-		return err
-	}
-
-	if exitCode != 0 {
-		// If the caller isn't explicitly capturing stderr output, then capture it here instead.
-		e := fmt.Sprintf("svm.runProcess: command %s failed with exit code %d", command, exitCode)
-		if stderr == nil {
-			e = fmt.Sprintf("%s. (%s)", e, errOut.String())
-		}
-		return fmt.Errorf(e)
-	}
-	return nil
-}

+ 0 - 139
daemon/graphdriver/lcow/remotefs.go

@@ -1,139 +0,0 @@
-// +build windows
-
-package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"runtime"
-	"strings"
-	"sync"
-
-	"github.com/Microsoft/hcsshim"
-	"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/containerfs"
-	"github.com/sirupsen/logrus"
-)
-
-type lcowfs struct {
-	root        string
-	d           *Driver
-	mappedDisks []hcsshim.MappedVirtualDisk
-	vmID        string
-	currentSVM  *serviceVM
-	sync.Mutex
-}
-
-var _ containerfs.ContainerFS = &lcowfs{}
-
-// ErrNotSupported is an error for unsupported operations in the remotefs
-var ErrNotSupported = fmt.Errorf("not supported")
-
-// Functions to implement the ContainerFS interface
-func (l *lcowfs) Path() string {
-	return l.root
-}
-
-func (l *lcowfs) ResolveScopedPath(path string, rawPath bool) (string, error) {
-	logrus.Debugf("remotefs.resolvescopedpath inputs: %s %s ", path, l.root)
-
-	arg1 := l.Join(l.root, path)
-	if !rawPath {
-		// The l.Join("/", path) will make path an absolute path and then clean it
-		// so if path = ../../X, it will become /X.
-		arg1 = l.Join(l.root, l.Join("/", path))
-	}
-	arg2 := l.root
-
-	output := &bytes.Buffer{}
-	if err := l.runRemoteFSProcess(nil, output, remotefs.ResolvePathCmd, arg1, arg2); err != nil {
-		return "", err
-	}
-
-	logrus.Debugf("remotefs.resolvescopedpath success. Output: %s\n", output.String())
-	return output.String(), nil
-}
-
-func (l *lcowfs) OS() string {
-	return "linux"
-}
-
-func (l *lcowfs) Architecture() string {
-	return runtime.GOARCH
-}
-
-// Other functions that are used by docker like the daemon Archiver/Extractor
-func (l *lcowfs) ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error {
-	logrus.Debugf("remotefs.ExtractArchve inputs: %s %+v", dst, opts)
-
-	tarBuf := &bytes.Buffer{}
-	if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
-		return fmt.Errorf("failed to marshall tar opts: %s", err)
-	}
-
-	input := io.MultiReader(tarBuf, src)
-	if err := l.runRemoteFSProcess(input, nil, remotefs.ExtractArchiveCmd, dst); err != nil {
-		return fmt.Errorf("failed to extract archive to %s: %s", dst, err)
-	}
-	return nil
-}
-
-func (l *lcowfs) ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) {
-	logrus.Debugf("remotefs.ArchivePath: %s %+v", src, opts)
-
-	tarBuf := &bytes.Buffer{}
-	if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
-		return nil, fmt.Errorf("failed to marshall tar opts: %s", err)
-	}
-
-	r, w := io.Pipe()
-	go func() {
-		defer w.Close()
-		if err := l.runRemoteFSProcess(tarBuf, w, remotefs.ArchivePathCmd, src); err != nil {
-			logrus.Debugf("REMOTEFS: Failed to extract archive: %s %+v %s", src, opts, err)
-		}
-	}()
-	return r, nil
-}
-
-// Helper functions
-func (l *lcowfs) startVM() error {
-	l.Lock()
-	defer l.Unlock()
-	if l.currentSVM != nil {
-		return nil
-	}
-
-	svm, err := l.d.startServiceVMIfNotRunning(l.vmID, l.mappedDisks, fmt.Sprintf("lcowfs.startVM"))
-	if err != nil {
-		return err
-	}
-
-	if err = svm.createUnionMount(l.root, l.mappedDisks...); err != nil {
-		return err
-	}
-	l.currentSVM = svm
-	return nil
-}
-
-func (l *lcowfs) runRemoteFSProcess(stdin io.Reader, stdout io.Writer, args ...string) error {
-	if err := l.startVM(); err != nil {
-		return err
-	}
-
-	// Append remotefs prefix and setup as a command line string
-	cmd := fmt.Sprintf("%s %s", remotefs.RemotefsCmd, strings.Join(args, " "))
-	stderr := &bytes.Buffer{}
-	if err := l.currentSVM.runProcess(cmd, stdin, stdout, stderr); err != nil {
-		return err
-	}
-
-	eerr, err := remotefs.ReadError(stderr)
-	if eerr != nil {
-		// Process returned an error so return that.
-		return remotefs.ExportedToError(eerr)
-	}
-	return err
-}

+ 0 - 211
daemon/graphdriver/lcow/remotefs_file.go

@@ -1,211 +0,0 @@
-// +build windows
-
-package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
-
-import (
-	"bytes"
-	"encoding/binary"
-	"encoding/json"
-	"fmt"
-	"io"
-	"os"
-	"strconv"
-
-	"github.com/Microsoft/hcsshim"
-	"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
-	"github.com/containerd/continuity/driver"
-)
-
-type lcowfile struct {
-	process   hcsshim.Process
-	stdin     io.WriteCloser
-	stdout    io.ReadCloser
-	stderr    io.ReadCloser
-	fs        *lcowfs
-	guestPath string
-}
-
-func (l *lcowfs) Open(path string) (driver.File, error) {
-	return l.OpenFile(path, os.O_RDONLY, 0)
-}
-
-func (l *lcowfs) OpenFile(path string, flag int, perm os.FileMode) (_ driver.File, err error) {
-	flagStr := strconv.FormatInt(int64(flag), 10)
-	permStr := strconv.FormatUint(uint64(perm), 8)
-
-	commandLine := fmt.Sprintf("%s %s %s %s %s", remotefs.RemotefsCmd, remotefs.OpenFileCmd, path, flagStr, permStr)
-	env := make(map[string]string)
-	env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:"
-	processConfig := &hcsshim.ProcessConfig{
-		EmulateConsole:    false,
-		CreateStdInPipe:   true,
-		CreateStdOutPipe:  true,
-		CreateStdErrPipe:  true,
-		CreateInUtilityVm: true,
-		WorkingDirectory:  "/bin",
-		Environment:       env,
-		CommandLine:       commandLine,
-	}
-
-	process, err := l.currentSVM.config.Uvm.CreateProcess(processConfig)
-	if err != nil {
-		return nil, fmt.Errorf("failed to open file %s: %s", path, err)
-	}
-
-	stdin, stdout, stderr, err := process.Stdio()
-	if err != nil {
-		process.Kill()
-		process.Close()
-		return nil, fmt.Errorf("failed to open file pipes %s: %s", path, err)
-	}
-
-	lf := &lcowfile{
-		process:   process,
-		stdin:     stdin,
-		stdout:    stdout,
-		stderr:    stderr,
-		fs:        l,
-		guestPath: path,
-	}
-
-	if _, err := lf.getResponse(); err != nil {
-		return nil, fmt.Errorf("failed to open file %s: %s", path, err)
-	}
-	return lf, nil
-}
-
-func (l *lcowfile) Read(b []byte) (int, error) {
-	hdr := &remotefs.FileHeader{
-		Cmd:  remotefs.Read,
-		Size: uint64(len(b)),
-	}
-
-	if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
-		return 0, err
-	}
-
-	buf, err := l.getResponse()
-	if err != nil {
-		return 0, err
-	}
-
-	n := copy(b, buf)
-	return n, nil
-}
-
-func (l *lcowfile) Write(b []byte) (int, error) {
-	hdr := &remotefs.FileHeader{
-		Cmd:  remotefs.Write,
-		Size: uint64(len(b)),
-	}
-
-	if err := remotefs.WriteFileHeader(l.stdin, hdr, b); err != nil {
-		return 0, err
-	}
-
-	_, err := l.getResponse()
-	if err != nil {
-		return 0, err
-	}
-
-	return len(b), nil
-}
-
-func (l *lcowfile) Seek(offset int64, whence int) (int64, error) {
-	seekHdr := &remotefs.SeekHeader{
-		Offset: offset,
-		Whence: int32(whence),
-	}
-
-	buf := &bytes.Buffer{}
-	if err := binary.Write(buf, binary.BigEndian, seekHdr); err != nil {
-		return 0, err
-	}
-
-	hdr := &remotefs.FileHeader{
-		Cmd:  remotefs.Write,
-		Size: uint64(buf.Len()),
-	}
-	if err := remotefs.WriteFileHeader(l.stdin, hdr, buf.Bytes()); err != nil {
-		return 0, err
-	}
-
-	resBuf, err := l.getResponse()
-	if err != nil {
-		return 0, err
-	}
-
-	var res int64
-	if err := binary.Read(bytes.NewBuffer(resBuf), binary.BigEndian, &res); err != nil {
-		return 0, err
-	}
-	return res, nil
-}
-
-func (l *lcowfile) Close() error {
-	hdr := &remotefs.FileHeader{
-		Cmd:  remotefs.Close,
-		Size: 0,
-	}
-
-	if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
-		return err
-	}
-
-	_, err := l.getResponse()
-	return err
-}
-
-func (l *lcowfile) Readdir(n int) ([]os.FileInfo, error) {
-	nStr := strconv.FormatInt(int64(n), 10)
-
-	// Unlike the other File functions, this one can just be run without maintaining state,
-	// so just do the normal runRemoteFSProcess way.
-	buf := &bytes.Buffer{}
-	if err := l.fs.runRemoteFSProcess(nil, buf, remotefs.ReadDirCmd, l.guestPath, nStr); err != nil {
-		return nil, err
-	}
-
-	var info []remotefs.FileInfo
-	if err := json.Unmarshal(buf.Bytes(), &info); err != nil {
-		return nil, err
-	}
-
-	osInfo := make([]os.FileInfo, len(info))
-	for i := range info {
-		osInfo[i] = &info[i]
-	}
-	return osInfo, nil
-}
-
-func (l *lcowfile) getResponse() ([]byte, error) {
-	hdr, err := remotefs.ReadFileHeader(l.stdout)
-	if err != nil {
-		return nil, err
-	}
-
-	if hdr.Cmd != remotefs.CmdOK {
-		// Something went wrong during the openfile in the server.
-		// Parse stderr and return that as an error
-		eerr, err := remotefs.ReadError(l.stderr)
-		if eerr != nil {
-			return nil, remotefs.ExportedToError(eerr)
-		}
-
-		// Maybe the parsing went wrong?
-		if err != nil {
-			return nil, err
-		}
-
-		// At this point, we know something went wrong in the remotefs program, but
-		// we we don't know why.
-		return nil, fmt.Errorf("unknown error")
-	}
-
-	// Successful command, we might have some data to read (for Read + Seek)
-	buf := make([]byte, hdr.Size, hdr.Size)
-	if _, err := io.ReadFull(l.stdout, buf); err != nil {
-		return nil, err
-	}
-	return buf, nil
-}

+ 0 - 123
daemon/graphdriver/lcow/remotefs_filedriver.go

@@ -1,123 +0,0 @@
-// +build windows
-
-package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
-
-import (
-	"bytes"
-	"encoding/json"
-	"os"
-	"strconv"
-
-	"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
-
-	"github.com/containerd/continuity/driver"
-	"github.com/sirupsen/logrus"
-)
-
-var _ driver.Driver = &lcowfs{}
-
-func (l *lcowfs) Readlink(p string) (string, error) {
-	logrus.Debugf("removefs.readlink args: %s", p)
-
-	result := &bytes.Buffer{}
-	if err := l.runRemoteFSProcess(nil, result, remotefs.ReadlinkCmd, p); err != nil {
-		return "", err
-	}
-	return result.String(), nil
-}
-
-func (l *lcowfs) Mkdir(path string, mode os.FileMode) error {
-	return l.mkdir(path, mode, remotefs.MkdirCmd)
-}
-
-func (l *lcowfs) MkdirAll(path string, mode os.FileMode) error {
-	return l.mkdir(path, mode, remotefs.MkdirAllCmd)
-}
-
-func (l *lcowfs) mkdir(path string, mode os.FileMode, cmd string) error {
-	modeStr := strconv.FormatUint(uint64(mode), 8)
-	logrus.Debugf("remotefs.%s args: %s %s", cmd, path, modeStr)
-	return l.runRemoteFSProcess(nil, nil, cmd, path, modeStr)
-}
-
-func (l *lcowfs) Remove(path string) error {
-	return l.remove(path, remotefs.RemoveCmd)
-}
-
-func (l *lcowfs) RemoveAll(path string) error {
-	return l.remove(path, remotefs.RemoveAllCmd)
-}
-
-func (l *lcowfs) remove(path string, cmd string) error {
-	logrus.Debugf("remotefs.%s args: %s", cmd, path)
-	return l.runRemoteFSProcess(nil, nil, cmd, path)
-}
-
-func (l *lcowfs) Link(oldname, newname string) error {
-	return l.link(oldname, newname, remotefs.LinkCmd)
-}
-
-func (l *lcowfs) Symlink(oldname, newname string) error {
-	return l.link(oldname, newname, remotefs.SymlinkCmd)
-}
-
-func (l *lcowfs) link(oldname, newname, cmd string) error {
-	logrus.Debugf("remotefs.%s args: %s %s", cmd, oldname, newname)
-	return l.runRemoteFSProcess(nil, nil, cmd, oldname, newname)
-}
-
-func (l *lcowfs) Lchown(name string, uid, gid int64) error {
-	uidStr := strconv.FormatInt(uid, 10)
-	gidStr := strconv.FormatInt(gid, 10)
-
-	logrus.Debugf("remotefs.lchown args: %s %s %s", name, uidStr, gidStr)
-	return l.runRemoteFSProcess(nil, nil, remotefs.LchownCmd, name, uidStr, gidStr)
-}
-
-// Lchmod changes the mode of an file not following symlinks.
-func (l *lcowfs) Lchmod(path string, mode os.FileMode) error {
-	modeStr := strconv.FormatUint(uint64(mode), 8)
-	logrus.Debugf("remotefs.lchmod args: %s %s", path, modeStr)
-	return l.runRemoteFSProcess(nil, nil, remotefs.LchmodCmd, path, modeStr)
-}
-
-func (l *lcowfs) Mknod(path string, mode os.FileMode, major, minor int) error {
-	modeStr := strconv.FormatUint(uint64(mode), 8)
-	majorStr := strconv.FormatUint(uint64(major), 10)
-	minorStr := strconv.FormatUint(uint64(minor), 10)
-
-	logrus.Debugf("remotefs.mknod args: %s %s %s %s", path, modeStr, majorStr, minorStr)
-	return l.runRemoteFSProcess(nil, nil, remotefs.MknodCmd, path, modeStr, majorStr, minorStr)
-}
-
-func (l *lcowfs) Mkfifo(path string, mode os.FileMode) error {
-	modeStr := strconv.FormatUint(uint64(mode), 8)
-	logrus.Debugf("remotefs.mkfifo args: %s %s", path, modeStr)
-	return l.runRemoteFSProcess(nil, nil, remotefs.MkfifoCmd, path, modeStr)
-}
-
-func (l *lcowfs) Stat(p string) (os.FileInfo, error) {
-	return l.stat(p, remotefs.StatCmd)
-}
-
-func (l *lcowfs) Lstat(p string) (os.FileInfo, error) {
-	return l.stat(p, remotefs.LstatCmd)
-}
-
-func (l *lcowfs) stat(path string, cmd string) (os.FileInfo, error) {
-	logrus.Debugf("remotefs.stat inputs: %s %s", cmd, path)
-
-	output := &bytes.Buffer{}
-	err := l.runRemoteFSProcess(nil, output, cmd, path)
-	if err != nil {
-		return nil, err
-	}
-
-	var fi remotefs.FileInfo
-	if err := json.Unmarshal(output.Bytes(), &fi); err != nil {
-		return nil, err
-	}
-
-	logrus.Debugf("remotefs.stat success. got: %v\n", fi)
-	return &fi, nil
-}

+ 0 - 212
daemon/graphdriver/lcow/remotefs_pathdriver.go

@@ -1,212 +0,0 @@
-// +build windows
-
-package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
-
-import (
-	"errors"
-	"os"
-	pathpkg "path"
-	"path/filepath"
-	"sort"
-	"strings"
-
-	"github.com/containerd/continuity/pathdriver"
-)
-
-var _ pathdriver.PathDriver = &lcowfs{}
-
-// Continuity Path functions can be done locally
-func (l *lcowfs) Join(path ...string) string {
-	return pathpkg.Join(path...)
-}
-
-func (l *lcowfs) IsAbs(path string) bool {
-	return pathpkg.IsAbs(path)
-}
-
-func sameWord(a, b string) bool {
-	return a == b
-}
-
-// Implementation taken from the Go standard library
-func (l *lcowfs) Rel(basepath, targpath string) (string, error) {
-	baseVol := ""
-	targVol := ""
-	base := l.Clean(basepath)
-	targ := l.Clean(targpath)
-	if sameWord(targ, base) {
-		return ".", nil
-	}
-	base = base[len(baseVol):]
-	targ = targ[len(targVol):]
-	if base == "." {
-		base = ""
-	}
-	// Can't use IsAbs - `\a` and `a` are both relative in Windows.
-	baseSlashed := len(base) > 0 && base[0] == l.Separator()
-	targSlashed := len(targ) > 0 && targ[0] == l.Separator()
-	if baseSlashed != targSlashed || !sameWord(baseVol, targVol) {
-		return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
-	}
-	// Position base[b0:bi] and targ[t0:ti] at the first differing elements.
-	bl := len(base)
-	tl := len(targ)
-	var b0, bi, t0, ti int
-	for {
-		for bi < bl && base[bi] != l.Separator() {
-			bi++
-		}
-		for ti < tl && targ[ti] != l.Separator() {
-			ti++
-		}
-		if !sameWord(targ[t0:ti], base[b0:bi]) {
-			break
-		}
-		if bi < bl {
-			bi++
-		}
-		if ti < tl {
-			ti++
-		}
-		b0 = bi
-		t0 = ti
-	}
-	if base[b0:bi] == ".." {
-		return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
-	}
-	if b0 != bl {
-		// Base elements left. Must go up before going down.
-		seps := strings.Count(base[b0:bl], string(l.Separator()))
-		size := 2 + seps*3
-		if tl != t0 {
-			size += 1 + tl - t0
-		}
-		buf := make([]byte, size)
-		n := copy(buf, "..")
-		for i := 0; i < seps; i++ {
-			buf[n] = l.Separator()
-			copy(buf[n+1:], "..")
-			n += 3
-		}
-		if t0 != tl {
-			buf[n] = l.Separator()
-			copy(buf[n+1:], targ[t0:])
-		}
-		return string(buf), nil
-	}
-	return targ[t0:], nil
-}
-
-func (l *lcowfs) Base(path string) string {
-	return pathpkg.Base(path)
-}
-
-func (l *lcowfs) Dir(path string) string {
-	return pathpkg.Dir(path)
-}
-
-func (l *lcowfs) Clean(path string) string {
-	return pathpkg.Clean(path)
-}
-
-func (l *lcowfs) Split(path string) (dir, file string) {
-	return pathpkg.Split(path)
-}
-
-func (l *lcowfs) Separator() byte {
-	return '/'
-}
-
-func (l *lcowfs) Abs(path string) (string, error) {
-	// Abs is supposed to add the current working directory, which is meaningless in lcow.
-	// So, return an error.
-	return "", ErrNotSupported
-}
-
-// Implementation taken from the Go standard library
-func (l *lcowfs) Walk(root string, walkFn filepath.WalkFunc) error {
-	info, err := l.Lstat(root)
-	if err != nil {
-		err = walkFn(root, nil, err)
-	} else {
-		err = l.walk(root, info, walkFn)
-	}
-	if err == filepath.SkipDir {
-		return nil
-	}
-	return err
-}
-
-// walk recursively descends path, calling w.
-func (l *lcowfs) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
-	err := walkFn(path, info, nil)
-	if err != nil {
-		if info.IsDir() && err == filepath.SkipDir {
-			return nil
-		}
-		return err
-	}
-
-	if !info.IsDir() {
-		return nil
-	}
-
-	names, err := l.readDirNames(path)
-	if err != nil {
-		return walkFn(path, info, err)
-	}
-
-	for _, name := range names {
-		filename := l.Join(path, name)
-		fileInfo, err := l.Lstat(filename)
-		if err != nil {
-			if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
-				return err
-			}
-		} else {
-			err = l.walk(filename, fileInfo, walkFn)
-			if err != nil {
-				if !fileInfo.IsDir() || err != filepath.SkipDir {
-					return err
-				}
-			}
-		}
-	}
-	return nil
-}
-
-// readDirNames reads the directory named by dirname and returns
-// a sorted list of directory entries.
-func (l *lcowfs) readDirNames(dirname string) ([]string, error) {
-	f, err := l.Open(dirname)
-	if err != nil {
-		return nil, err
-	}
-	files, err := f.Readdir(-1)
-	f.Close()
-	if err != nil {
-		return nil, err
-	}
-
-	names := make([]string, len(files), len(files))
-	for i := range files {
-		names[i] = files[i].Name()
-	}
-
-	sort.Strings(names)
-	return names, nil
-}
-
-// Note that Go's filepath.FromSlash/ToSlash convert between OS paths and '/'. Since the path separator
-// for LCOW (and Unix) is '/', they are no-ops.
-func (l *lcowfs) FromSlash(path string) string {
-	return path
-}
-
-func (l *lcowfs) ToSlash(path string) string {
-	return path
-}
-
-func (l *lcowfs) Match(pattern, name string) (matched bool, err error) {
-	return pathpkg.Match(pattern, name)
-}

+ 0 - 1
daemon/graphdriver/register/register_windows.go

@@ -2,6 +2,5 @@ package register // import "github.com/docker/docker/daemon/graphdriver/register
 
 import (
 	// register the windows graph drivers
-	_ "github.com/docker/docker/daemon/graphdriver/lcow"
 	_ "github.com/docker/docker/daemon/graphdriver/windows"
 )

+ 142 - 239
hack/ci/windows.ps1

@@ -9,12 +9,6 @@
 $ErrorActionPreference = 'Stop'
 $StartTime=Get-Date
 
-# Put up top to be blindingly obvious. The production jenkins.dockerproject.org Linux-container 
-# CI job is "Docker-PRs-LoW-RS3". Force into LCOW mode for this run, or not.
-if ($env:BUILD_TAG -match "-LoW") { $env:LCOW_MODE=1 }
-if ($env:BUILD_TAG -match "-WoW") { $env:LCOW_MODE="" }
-
-
 Write-Host -ForegroundColor Red "DEBUG: print all environment variables to check how Jenkins runs this script"
 $allArgs = [Environment]::GetCommandLineArgs()
 Write-Host -ForegroundColor Red $allArgs
@@ -100,11 +94,6 @@ Write-Host -ForegroundColor Red "-----------------------------------------------
 #    WINDOWS_BASE_IMAGE_TAG   if defined, uses that as the tag name for the base image.
 #                             if no set, defaults to latest
 #
-#    LCOW_BASIC_MODE          if defined, does very basic LCOW verification. Ultimately we 
-#                             want to run the entire CI suite from docker, but that's a way off.
-#                            
-#    LCOW_MODE                if defined, runs the entire CI suite
-#                            
 # -------------------------------------------------------------------------------------------
 #
 # Jenkins Integration. Add a Windows Powershell build step as follows:
@@ -628,11 +617,6 @@ Try {
     Write-Host -ForegroundColor Green "INFO: Args: $dutArgs"
     New-Item -ItemType Directory $env:TEMP\daemon -ErrorAction SilentlyContinue  | Out-Null
 
-    # In LCOW mode, for now we need to set an environment variable before starting the daemon under test
-    if (($null -ne $env:LCOW_MODE) -or ($null -ne $env:LCOW_BASIC_MODE)) {
-        $env:LCOW_SUPPORTED=1
-    }
-
     # Cannot fathom why, but always writes to stderr....
     Start-Process "$env:TEMP\binary\dockerd-$COMMITHASH" `
                   -ArgumentList $dutArgs `
@@ -641,12 +625,6 @@ Try {
     Write-Host -ForegroundColor Green "INFO: Process started successfully."
     $daemonStarted=1
 
-    # In LCOW mode, turn off that variable
-    if (($null -ne $env:LCOW_MODE) -or ($null -ne $env:LCOW_BASIC_MODE)) {
-        $env:LCOW_SUPPORTED=""
-    }
-
-
     # Start tailing the daemon under test if the command is installed
     if ($null -ne (Get-Command "tail" -ErrorAction SilentlyContinue)) {
         Write-Host -ForegroundColor green "INFO: Start tailing logs of the daemon under tests"
@@ -706,64 +684,60 @@ Try {
     }
     Write-Host
 
-    # Don't need Windows images when in LCOW mode.
-    if (($null -eq $env:LCOW_MODE) -and ($null -eq $env:LCOW_BASIC_MODE)) {
-
-        # Default to windowsservercore for the base image used for the tests. The "docker" image
-        # and the control daemon use microsoft/windowsservercore regardless. This is *JUST* for the tests.
-        if ($null -eq $env:WINDOWS_BASE_IMAGE) {
-            $env:WINDOWS_BASE_IMAGE="microsoft/windowsservercore"
-        }
-        if ($null -eq $env:WINDOWS_BASE_IMAGE_TAG) {
-            $env:WINDOWS_BASE_IMAGE_TAG="latest"
-        }
+    # Default to windowsservercore for the base image used for the tests. The "docker" image
+    # and the control daemon use microsoft/windowsservercore regardless. This is *JUST* for the tests.
+    if ($null -eq $env:WINDOWS_BASE_IMAGE) {
+        $env:WINDOWS_BASE_IMAGE="microsoft/windowsservercore"
+    }
+    if ($null -eq $env:WINDOWS_BASE_IMAGE_TAG) {
+        $env:WINDOWS_BASE_IMAGE_TAG="latest"
+    }
 
-        # Lowercase and make sure it has a microsoft/ prefix
-        $env:WINDOWS_BASE_IMAGE = $env:WINDOWS_BASE_IMAGE.ToLower()
-        if (! $($env:WINDOWS_BASE_IMAGE -Split "/")[0] -match "microsoft") {
-            Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/ or mcr.microsoft.com/"
-        }
+    # Lowercase and make sure it has a microsoft/ prefix
+    $env:WINDOWS_BASE_IMAGE = $env:WINDOWS_BASE_IMAGE.ToLower()
+    if (! $($env:WINDOWS_BASE_IMAGE -Split "/")[0] -match "microsoft") {
+        Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/ or mcr.microsoft.com/"
+    }
 
-        Write-Host -ForegroundColor Green "INFO: Base image for tests is $env:WINDOWS_BASE_IMAGE"
+    Write-Host -ForegroundColor Green "INFO: Base image for tests is $env:WINDOWS_BASE_IMAGE"
 
-        $ErrorActionPreference = "SilentlyContinue"
-        if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" | Measure-Object -Line).Lines) -eq 0) {
-            # Try the internal azure CI image version or Microsoft internal corpnet where the base image is already pre-prepared on the disk,
-            # either through Invoke-DockerCI or, in the case of Azure CI servers, baked into the VHD at the same location.
-            if (Test-Path $("c:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")) {
-                Write-Host  -ForegroundColor Green "INFO: Loading"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]".tar from disk into the daemon under test. This may take some time..."
-                $ErrorActionPreference = "SilentlyContinue"
-                & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" load -i $("$readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")
-                $ErrorActionPreference = "Stop"
-                if (-not $LastExitCode -eq 0) {
-                    Throw $("ERROR: Failed to load $readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar into daemon under test")
-                }
-                Write-Host -ForegroundColor Green "INFO: docker load of"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]" into daemon under test completed successfully"
-            } else {
-                # We need to docker pull it instead. It will come in directly as microsoft/imagename:tagname
-                Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":"+$env:WINDOWS_BASE_IMAGE_TAG+" from docker hub into daemon under test. This may take some time...")
-                $ErrorActionPreference = "SilentlyContinue"
-                & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG"
-                $ErrorActionPreference = "Stop"
-                if (-not $LastExitCode -eq 0) {
-                    Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test.")
-                }
-                Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test completed successfully")
-                Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage in daemon under test")
-                & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage
+    $ErrorActionPreference = "SilentlyContinue"
+    if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" | Measure-Object -Line).Lines) -eq 0) {
+        # Try the internal azure CI image version or Microsoft internal corpnet where the base image is already pre-prepared on the disk,
+        # either through Invoke-DockerCI or, in the case of Azure CI servers, baked into the VHD at the same location.
+        if (Test-Path $("c:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")) {
+            Write-Host  -ForegroundColor Green "INFO: Loading"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]".tar from disk into the daemon under test. This may take some time..."
+            $ErrorActionPreference = "SilentlyContinue"
+            & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" load -i $("$readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")
+            $ErrorActionPreference = "Stop"
+            if (-not $LastExitCode -eq 0) {
+                Throw $("ERROR: Failed to load $readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar into daemon under test")
             }
+            Write-Host -ForegroundColor Green "INFO: docker load of"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]" into daemon under test completed successfully"
         } else {
-            Write-Host -ForegroundColor Green "INFO: Image $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is already loaded in the daemon under test"
+            # We need to docker pull it instead. It will come in directly as microsoft/imagename:tagname
+            Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":"+$env:WINDOWS_BASE_IMAGE_TAG+" from docker hub into daemon under test. This may take some time...")
+            $ErrorActionPreference = "SilentlyContinue"
+            & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG"
+            $ErrorActionPreference = "Stop"
+            if (-not $LastExitCode -eq 0) {
+                Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test.")
+            }
+            Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test completed successfully")
+            Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage in daemon under test")
+            & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage
         }
-    
-    
-        # Inspect the pulled or loaded image to get the version directly
-        $ErrorActionPreference = "SilentlyContinue"
-        $dutimgVersion = $(&"$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" inspect "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" --format "{{.OsVersion}}")
-        $ErrorActionPreference = "Stop"
-        Write-Host -ForegroundColor Green $("INFO: Version of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is '"+$dutimgVersion+"'")
+    } else {
+        Write-Host -ForegroundColor Green "INFO: Image $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is already loaded in the daemon under test"
     }
 
+
+    # Inspect the pulled or loaded image to get the version directly
+    $ErrorActionPreference = "SilentlyContinue"
+    $dutimgVersion = $(&"$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" inspect "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" --format "{{.OsVersion}}")
+    $ErrorActionPreference = "Stop"
+    Write-Host -ForegroundColor Green $("INFO: Version of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is '"+$dutimgVersion+"'")
+
     # Run the validation tests unless SKIP_VALIDATION_TESTS is defined.
     if ($null -eq $env:SKIP_VALIDATION_TESTS) {
         Write-Host -ForegroundColor Cyan "INFO: Running validation tests at $(Get-Date)..."
@@ -778,193 +752,122 @@ Try {
         Write-Host -ForegroundColor Magenta "WARN: Skipping validation tests"
     }
 
-    # Note the unit tests won't work in LCOW mode as I turned off loading the base images above.
     # Run the unit tests inside a container unless SKIP_UNIT_TESTS is defined
-    if (($null -eq $env:LCOW_MODE) -and ($null -eq $env:LCOW_BASIC_MODE)) {
-        if ($null -eq $env:SKIP_UNIT_TESTS) {
-            $ContainerNameForUnitTests = $COMMITHASH + "_UnitTests"
-            Write-Host -ForegroundColor Cyan "INFO: Running unit tests at $(Get-Date)..."
-            $ErrorActionPreference = "SilentlyContinue"
-            $Duration=$(Measure-Command {docker run --name $ContainerNameForUnitTests -e DOCKER_GITCOMMIT=$COMMITHASH$CommitUnsupported docker hack\make.ps1 -TestUnit | Out-Host })
-            $TestRunExitCode = $LastExitCode
-            $ErrorActionPreference = "Stop"
-
-            # Saving where jenkins will take a look at.....
-            New-Item -Force -ItemType Directory bundles | Out-Null
-            $unitTestsContPath="$ContainerNameForUnitTests`:c`:\gopath\src\github.com\docker\docker\bundles"
-            $JunitExpectedContFilePath = "$unitTestsContPath\junit-report-unit-tests.xml"
-            docker cp $JunitExpectedContFilePath "bundles"
-            if (-not($LastExitCode -eq 0)) {
-                Throw "ERROR: Failed to docker cp the unit tests report ($JunitExpectedContFilePath) to bundles"
-            }
+    if ($null -eq $env:SKIP_UNIT_TESTS) {
+        $ContainerNameForUnitTests = $COMMITHASH + "_UnitTests"
+        Write-Host -ForegroundColor Cyan "INFO: Running unit tests at $(Get-Date)..."
+        $ErrorActionPreference = "SilentlyContinue"
+        $Duration=$(Measure-Command {docker run --name $ContainerNameForUnitTests -e DOCKER_GITCOMMIT=$COMMITHASH$CommitUnsupported docker hack\make.ps1 -TestUnit | Out-Host })
+        $TestRunExitCode = $LastExitCode
+        $ErrorActionPreference = "Stop"
 
-            if (Test-Path "bundles\junit-report-unit-tests.xml") {
-                Write-Host -ForegroundColor Magenta "INFO: Unit tests results(bundles\junit-report-unit-tests.xml) exist. pwd=$pwd"
-            } else {
-                Write-Host -ForegroundColor Magenta "ERROR: Unit tests results(bundles\junit-report-unit-tests.xml) do not exist. pwd=$pwd"
-            }
+        # Saving where jenkins will take a look at.....
+        New-Item -Force -ItemType Directory bundles | Out-Null
+        $unitTestsContPath="$ContainerNameForUnitTests`:c`:\gopath\src\github.com\docker\docker\bundles"
+        $JunitExpectedContFilePath = "$unitTestsContPath\junit-report-unit-tests.xml"
+        docker cp $JunitExpectedContFilePath "bundles"
+        if (-not($LastExitCode -eq 0)) {
+            Throw "ERROR: Failed to docker cp the unit tests report ($JunitExpectedContFilePath) to bundles"
+        }
 
-            if (-not($TestRunExitCode -eq 0)) {
-                Throw "ERROR: Unit tests failed"
-            }
-            Write-Host  -ForegroundColor Green "INFO: Unit tests ended at $(Get-Date). Duration`:$Duration"
+        if (Test-Path "bundles\junit-report-unit-tests.xml") {
+            Write-Host -ForegroundColor Magenta "INFO: Unit tests results(bundles\junit-report-unit-tests.xml) exist. pwd=$pwd"
         } else {
-            Write-Host -ForegroundColor Magenta "WARN: Skipping unit tests"
+            Write-Host -ForegroundColor Magenta "ERROR: Unit tests results(bundles\junit-report-unit-tests.xml) do not exist. pwd=$pwd"
         }
+
+        if (-not($TestRunExitCode -eq 0)) {
+            Throw "ERROR: Unit tests failed"
+        }
+        Write-Host  -ForegroundColor Green "INFO: Unit tests ended at $(Get-Date). Duration`:$Duration"
+    } else {
+        Write-Host -ForegroundColor Magenta "WARN: Skipping unit tests"
     }
 
     # Add the Windows busybox image. Needed for WCOW integration tests
-    if (($null -eq $env:LCOW_MODE) -and ($null -eq $env:LCOW_BASIC_MODE)) {
-        if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
-            Write-Host -ForegroundColor Green "INFO: Building busybox"
-            $ErrorActionPreference = "SilentlyContinue"
-            $(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build  -t busybox --build-arg WINDOWS_BASE_IMAGE --build-arg WINDOWS_BASE_IMAGE_TAG "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\contrib\busybox\" | Out-Host)
-            $ErrorActionPreference = "Stop"
-            if (-not($LastExitCode -eq 0)) {
-                Throw "ERROR: Failed to build busybox image"
-            }
+    if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
+        Write-Host -ForegroundColor Green "INFO: Building busybox"
+        $ErrorActionPreference = "SilentlyContinue"
+        $(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build  -t busybox --build-arg WINDOWS_BASE_IMAGE --build-arg WINDOWS_BASE_IMAGE_TAG "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\contrib\busybox\" | Out-Host)
+        $ErrorActionPreference = "Stop"
+        if (-not($LastExitCode -eq 0)) {
+            Throw "ERROR: Failed to build busybox image"
+        }
 
-            Write-Host -ForegroundColor Green "INFO: Docker images of the daemon under test"
-            Write-Host 
-            $ErrorActionPreference = "SilentlyContinue"
-            & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images
-            $ErrorActionPreference = "Stop"
-            if ($LastExitCode -ne 0) {
-                Throw "ERROR: The daemon under test does not appear to be running."
-            }
-            Write-Host
+        Write-Host -ForegroundColor Green "INFO: Docker images of the daemon under test"
+        Write-Host
+        $ErrorActionPreference = "SilentlyContinue"
+        & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images
+        $ErrorActionPreference = "Stop"
+        if ($LastExitCode -ne 0) {
+            Throw "ERROR: The daemon under test does not appear to be running."
         }
+        Write-Host
     }
 
     # Run the WCOW integration tests unless SKIP_INTEGRATION_TESTS is defined
-    if (($null -eq $env:LCOW_MODE) -and ($null -eq $env:LCOW_BASIC_MODE)) {
-        if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
-            Write-Host -ForegroundColor Cyan "INFO: Running integration tests at $(Get-Date)..."
-            $ErrorActionPreference = "SilentlyContinue"
-    
-            # Location of the daemon under test.
-            $env:OrigDOCKER_HOST="$env:DOCKER_HOST"
-    
-            #https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
-            $jsonFilePath = "..\\bundles\\go-test-report-intcli-tests.json"
-            $xmlFilePath = "..\\bundles\\junit-report-intcli-tests.xml"
-            $c = "gotestsum --format=standard-verbose --jsonfile=$jsonFilePath --junitfile=$xmlFilePath -- "
-            if ($null -ne $env:INTEGRATION_TEST_NAME) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
-                $c += "`"-test.run`" "
-                $c += "`"$env:INTEGRATION_TEST_NAME`" "
-                Write-Host -ForegroundColor Magenta "WARN: Only running integration tests matching $env:INTEGRATION_TEST_NAME"
-            }
-            $c += "`"-tags`" " + "`"autogen`" "
-            $c += "`"-test.timeout`" " + "`"200m`" "
-    
-            if ($null -ne $env:INTEGRATION_IN_CONTAINER) {
-                Write-Host -ForegroundColor Green "INFO: Integration tests being run inside a container"
-                # Note we talk back through the containers gateway address
-                # And the ridiculous lengths we have to go to get the default gateway address... (GetNetIPConfiguration doesn't work in nanoserver)
-                # I just could not get the escaping to work in a single command, so output $c to a file and run that in the container instead...
-                # Not the prettiest, but it works.
-                $c | Out-File -Force "$env:TEMP\binary\runIntegrationCLI.ps1"
-                $Duration= $(Measure-Command { & docker run `
-                                                        --rm `
-                                                        -e c=$c `
-                                                        --workdir "c`:\gopath\src\github.com\docker\docker\integration-cli" `
-                                                        -v "$env:TEMP\binary`:c:\target" `
-                                                        docker `
-                                                        "`$env`:PATH`='c`:\target;'+`$env:PATH`;  `$env:DOCKER_HOST`='tcp`://'+(ipconfig | select -last 1).Substring(39)+'`:2357'; c:\target\runIntegrationCLI.ps1" | Out-Host } )
-            } else  {
-                $env:DOCKER_HOST=$DASHH_CUT  
-                $env:PATH="$env:TEMP\binary;$env:PATH;"  # Force to use the test binaries, not the host ones.
-                $env:GO111MODULE="off"
-                Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
-
-                $ErrorActionPreference = "SilentlyContinue"
-                Write-Host -ForegroundColor Cyan "INFO: Integration API tests being run from the host:"
-                $start=(Get-Date); Invoke-Expression ".\hack\make.ps1 -TestIntegration"; $Duration=New-Timespan -Start $start -End (Get-Date)
-                $IntTestsRunResult = $LastExitCode
-                $ErrorActionPreference = "Stop"
-                if (-not($IntTestsRunResult -eq 0)) {
-                    Throw "ERROR: Integration API tests failed at $(Get-Date). Duration`:$Duration"
-                }
+    if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
+        Write-Host -ForegroundColor Cyan "INFO: Running integration tests at $(Get-Date)..."
+        $ErrorActionPreference = "SilentlyContinue"
 
-                $ErrorActionPreference = "SilentlyContinue"
-                Write-Host -ForegroundColor Green "INFO: Integration CLI tests being run from the host:"
-                Write-Host -ForegroundColor Green "INFO: $c"
-                Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
-                # Explicit to not use measure-command otherwise don't get output as it goes
-                $start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
-            }
-            $ErrorActionPreference = "Stop"
-            if (-not($LastExitCode -eq 0)) {
-                Throw "ERROR: Integration CLI tests failed at $(Get-Date). Duration`:$Duration"
-            }
-            Write-Host  -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
-        } else {
-            Write-Host -ForegroundColor Magenta "WARN: Skipping integration tests"
+        # Location of the daemon under test.
+        $env:OrigDOCKER_HOST="$env:DOCKER_HOST"
+
+        #https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
+        $jsonFilePath = "..\\bundles\\go-test-report-intcli-tests.json"
+        $xmlFilePath = "..\\bundles\\junit-report-intcli-tests.xml"
+        $c = "gotestsum --format=standard-verbose --jsonfile=$jsonFilePath --junitfile=$xmlFilePath -- "
+        if ($null -ne $env:INTEGRATION_TEST_NAME) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
+            $c += "`"-test.run`" "
+            $c += "`"$env:INTEGRATION_TEST_NAME`" "
+            Write-Host -ForegroundColor Magenta "WARN: Only running integration tests matching $env:INTEGRATION_TEST_NAME"
         }
-    } else {
-        # The LCOW version of the tests here
-        if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
-            Write-Host -ForegroundColor Cyan "INFO: Running LCOW tests at $(Get-Date)..."
-
-            $ErrorActionPreference = "SilentlyContinue"
-    
-            # Location of the daemon under test.
-            $env:OrigDOCKER_HOST="$env:DOCKER_HOST"
-
-            # Make sure we are pointing at the DUT
-            $env:DOCKER_HOST=$DASHH_CUT  
+        $c += "`"-tags`" " + "`"autogen`" "
+        $c += "`"-test.timeout`" " + "`"200m`" "
+
+        if ($null -ne $env:INTEGRATION_IN_CONTAINER) {
+            Write-Host -ForegroundColor Green "INFO: Integration tests being run inside a container"
+            # Note we talk back through the containers gateway address
+            # And the ridiculous lengths we have to go to get the default gateway address... (GetNetIPConfiguration doesn't work in nanoserver)
+            # I just could not get the escaping to work in a single command, so output $c to a file and run that in the container instead...
+            # Not the prettiest, but it works.
+            $c | Out-File -Force "$env:TEMP\binary\runIntegrationCLI.ps1"
+            $Duration= $(Measure-Command { & docker run `
+                                                    --rm `
+                                                    -e c=$c `
+                                                    --workdir "c`:\gopath\src\github.com\docker\docker\integration-cli" `
+                                                    -v "$env:TEMP\binary`:c:\target" `
+                                                    docker `
+                                                    "`$env`:PATH`='c`:\target;'+`$env:PATH`;  `$env:DOCKER_HOST`='tcp`://'+(ipconfig | select -last 1).Substring(39)+'`:2357'; c:\target\runIntegrationCLI.ps1" | Out-Host } )
+        } else  {
+            $env:DOCKER_HOST=$DASHH_CUT
+            $env:PATH="$env:TEMP\binary;$env:PATH;"  # Force to use the test binaries, not the host ones.
+                $env:GO111MODULE="off"
             Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
 
-            # Force to use the test binaries, not the host ones.
-            $env:PATH="$env:TEMP\binary;$env:PATH;"  
-
-            if ($null -ne $env:LCOW_BASIC_MODE) {
-                $wc = New-Object net.webclient
-                try {
-                    Write-Host -ForegroundColor green "INFO: Downloading latest execution script..."
-                    $wc.Downloadfile("https://raw.githubusercontent.com/kevpar/docker-w2wCIScripts/master/runCI/lcowbasicvalidation.ps1", "$env:TEMP\binary\lcowbasicvalidation.ps1")
-                } 
-                catch [System.Net.WebException]
-                {
-                    Throw ("Failed to download: $_")
-                }
-
-                # Explicit to not use measure-command otherwise don't get output as it goes
-                $ErrorActionPreference = "Stop"
-                $start=(Get-Date); Invoke-Expression "powershell $env:TEMP\binary\lcowbasicvalidation.ps1"; $lec=$lastExitCode; $Duration=New-Timespan -Start $start -End (Get-Date)
-                $Duration=New-Timespan -Start $start -End (Get-Date)
-                Write-Host  -ForegroundColor Green "INFO: LCOW tests ended at $(Get-Date). Duration`:$Duration"
-                if ($lec -ne 0) {
-                    Throw "LCOW validation tests failed"
-                }
-            } else {
-                #https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
-                $c = "go test "
-                $c += "`"-test.v`" "
-                if ($null -ne $env:INTEGRATION_TEST_NAME) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
-                    $c += "`"-test.run`" "
-                    $c += "`"$env:INTEGRATION_TEST_NAME`" "
-                    Write-Host -ForegroundColor Magenta "WARN: Only running LCOW integration tests matching $env:INTEGRATION_TEST_NAME"
-                }
-                $c += "`"-tags`" " + "`"autogen`" "
-                $c += "`"-test.timeout`" " + "`"200m`" "
-
-                Write-Host -ForegroundColor Green "INFO: LCOW Integration tests being run from the host:"
-                Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
-                Write-Host -ForegroundColor Green "INFO: $c"
-                Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
-                # Explicit to not use measure-command otherwise don't get output as it goes
-                $start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
-
-            }
+            $ErrorActionPreference = "SilentlyContinue"
+            Write-Host -ForegroundColor Cyan "INFO: Integration API tests being run from the host:"
+            $start=(Get-Date); Invoke-Expression ".\hack\make.ps1 -TestIntegration"; $Duration=New-Timespan -Start $start -End (Get-Date)
+            $IntTestsRunResult = $LastExitCode
             $ErrorActionPreference = "Stop"
-            if (-not($LastExitCode -eq 0)) {
-                Throw "ERROR: Integration tests failed at $(Get-Date). Duration`:$Duration"
+            if (-not($IntTestsRunResult -eq 0)) {
+                Throw "ERROR: Integration API tests failed at $(Get-Date). Duration`:$Duration"
             }
-            Write-Host  -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
-        } else {
-            Write-Host -ForegroundColor Magenta "WARN: Skipping LCOW tests"
+
+            $ErrorActionPreference = "SilentlyContinue"
+            Write-Host -ForegroundColor Green "INFO: Integration CLI tests being run from the host:"
+            Write-Host -ForegroundColor Green "INFO: $c"
+            Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
+            # Explicit to not use measure-command otherwise don't get output as it goes
+            $start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
+        }
+        $ErrorActionPreference = "Stop"
+        if (-not($LastExitCode -eq 0)) {
+            Throw "ERROR: Integration CLI tests failed at $(Get-Date). Duration`:$Duration"
         }
+        Write-Host  -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
+    } else {
+        Write-Host -ForegroundColor Magenta "WARN: Skipping integration tests"
     }
 
     # Docker info now to get counts (after or if jjh/containercounts is merged)

+ 2 - 6
image/tarexport/load.go

@@ -426,12 +426,8 @@ func checkCompatibleOS(imageOS string) error {
 		return fmt.Errorf("cannot load %s image on %s", imageOS, runtime.GOOS)
 	}
 
-	p, err := platforms.Parse(imageOS)
-	if err != nil {
-		return err
-	}
-
-	return system.ValidatePlatform(p)
+	_, err := platforms.Parse(imageOS)
+	return err
 }
 
 func validateManifest(manifest []manifestItem) error {

+ 0 - 48
pkg/system/lcow.go

@@ -1,48 +0,0 @@
-// +build windows,!no_lcow
-
-package system // import "github.com/docker/docker/pkg/system"
-
-import (
-	"strings"
-
-	"github.com/Microsoft/hcsshim/osversion"
-	specs "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
-)
-
-var (
-	// lcowSupported determines if Linux Containers on Windows are supported.
-	lcowSupported = false
-)
-
-// InitLCOW sets whether LCOW is supported or not. Requires RS5+
-func InitLCOW(experimental bool) {
-	if experimental && osversion.Build() >= osversion.RS5 {
-		lcowSupported = true
-	}
-}
-
-func LCOWSupported() bool {
-	return lcowSupported
-}
-
-// ValidatePlatform determines if a platform structure is valid.
-// TODO This is a temporary windows-only function, should be replaced by
-// comparison of worker capabilities
-func ValidatePlatform(platform specs.Platform) error {
-	if !IsOSSupported(platform.OS) {
-		return errors.Errorf("unsupported os %s", platform.OS)
-	}
-	return nil
-}
-
-// IsOSSupported determines if an operating system is supported by the host
-func IsOSSupported(os string) bool {
-	if strings.EqualFold("windows", os) {
-		return true
-	}
-	if LCOWSupported() && strings.EqualFold(os, "linux") {
-		return true
-	}
-	return false
-}

+ 0 - 13
pkg/system/lcow_unsupported.go

@@ -1,27 +1,14 @@
-// +build !windows windows,no_lcow
-
 package system // import "github.com/docker/docker/pkg/system"
 import (
 	"runtime"
 	"strings"
-
-	specs "github.com/opencontainers/image-spec/specs-go/v1"
 )
 
-// InitLCOW does nothing since LCOW is a windows only feature
-func InitLCOW(_ bool) {}
-
 // LCOWSupported returns true if Linux containers on Windows are supported.
 func LCOWSupported() bool {
 	return false
 }
 
-// ValidatePlatform determines if a platform structure is valid. This function
-// is used for LCOW, and is a no-op on non-windows platforms.
-func ValidatePlatform(_ specs.Platform) error {
-	return nil
-}
-
 // IsOSSupported determines if an operating system is supported by the host.
 func IsOSSupported(os string) bool {
 	return strings.EqualFold(runtime.GOOS, os)

+ 5 - 27
pkg/system/path.go

@@ -1,24 +1,15 @@
 package system // import "github.com/docker/docker/pkg/system"
 
-import (
-	"fmt"
-	"path/filepath"
-	"runtime"
-	"strings"
-)
-
 const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
 
 // DefaultPathEnv is unix style list of directories to search for
 // executables. Each directory is separated from the next by a colon
 // ':' character .
+// For Windows containers, an empty string is returned as the default
+// path will be set by the container, and Docker has no context of what the
+// default path should be.
 func DefaultPathEnv(os string) string {
-	if runtime.GOOS == "windows" {
-		if os != runtime.GOOS {
-			return defaultUnixPathEnv
-		}
-		// Deliberately empty on Windows containers on Windows as the default path will be set by
-		// the container. Docker has no context of what the default path should be.
+	if os == "windows" {
 		return ""
 	}
 	return defaultUnixPathEnv
@@ -47,18 +38,5 @@ type PathVerifier interface {
 // /a			--> \a
 // d:\			--> Fail
 func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
-	if runtime.GOOS != "windows" || LCOWSupported() {
-		return path, nil
-	}
-
-	if len(path) == 2 && string(path[1]) == ":" {
-		return "", fmt.Errorf("No relative path specified in %q", path)
-	}
-	if !driver.IsAbs(path) || len(path) < 2 {
-		return filepath.FromSlash(path), nil
-	}
-	if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
-		return "", fmt.Errorf("The specified path is not on the system drive (C:)")
-	}
-	return filepath.FromSlash(path[2:]), nil
+	return checkSystemDriveAndRemoveDriveLetter(path, driver)
 }

+ 6 - 0
pkg/system/path_unix.go

@@ -8,3 +8,9 @@ package system // import "github.com/docker/docker/pkg/system"
 func GetLongPathName(path string) (string, error) {
 	return path, nil
 }
+
+// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation
+// of CheckSystemDriveAndRemoveDriveLetter
+func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
+	return path, nil
+}

+ 22 - 1
pkg/system/path_windows.go

@@ -1,6 +1,12 @@
 package system // import "github.com/docker/docker/pkg/system"
 
-import "golang.org/x/sys/windows"
+import (
+	"fmt"
+	"path/filepath"
+	"strings"
+
+	"golang.org/x/sys/windows"
+)
 
 // GetLongPathName converts Windows short pathnames to full pathnames.
 // For example C:\Users\ADMIN~1 --> C:\Users\Administrator.
@@ -25,3 +31,18 @@ func GetLongPathName(path string) (string, error) {
 	}
 	return windows.UTF16ToString(b), nil
 }
+
+// checkSystemDriveAndRemoveDriveLetter is the Windows implementation
+// of CheckSystemDriveAndRemoveDriveLetter
+func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
+	if len(path) == 2 && string(path[1]) == ":" {
+		return "", fmt.Errorf("No relative path specified in %q", path)
+	}
+	if !driver.IsAbs(path) || len(path) < 2 {
+		return filepath.FromSlash(path), nil
+	}
+	if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+		return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+	}
+	return filepath.FromSlash(path[2:]), nil
+}

+ 0 - 6
project/PACKAGERS.md

@@ -185,12 +185,6 @@ NOTE: if you need to set more than one build tag, space separate them:
 export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
 ```
 
-### LCOW (Linux Containers On Windows)
-
-LCOW is an experimental feature on Windows, and requires the daemon to run with
-experimental features enabled. Use the `no_lcow` build tag to disable the LCOW
-feature at compile time, 
-
 ### Static Daemon
 
 If it is feasible within the constraints of your distribution, you should

+ 0 - 1325
vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go

@@ -1,1325 +0,0 @@
-package compactext4
-
-import (
-	"bufio"
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"path"
-	"sort"
-	"strings"
-	"time"
-
-	"github.com/Microsoft/hcsshim/ext4/internal/format"
-)
-
-// Writer writes a compact ext4 file system.
-type Writer struct {
-	f                    io.ReadWriteSeeker
-	bw                   *bufio.Writer
-	inodes               []*inode
-	curName              string
-	curInode             *inode
-	pos                  int64
-	dataWritten, dataMax int64
-	err                  error
-	initialized          bool
-	supportInlineData    bool
-	maxDiskSize          int64
-	gdBlocks             uint32
-}
-
-// Mode flags for Linux files.
-const (
-	S_IXOTH  = format.S_IXOTH
-	S_IWOTH  = format.S_IWOTH
-	S_IROTH  = format.S_IROTH
-	S_IXGRP  = format.S_IXGRP
-	S_IWGRP  = format.S_IWGRP
-	S_IRGRP  = format.S_IRGRP
-	S_IXUSR  = format.S_IXUSR
-	S_IWUSR  = format.S_IWUSR
-	S_IRUSR  = format.S_IRUSR
-	S_ISVTX  = format.S_ISVTX
-	S_ISGID  = format.S_ISGID
-	S_ISUID  = format.S_ISUID
-	S_IFIFO  = format.S_IFIFO
-	S_IFCHR  = format.S_IFCHR
-	S_IFDIR  = format.S_IFDIR
-	S_IFBLK  = format.S_IFBLK
-	S_IFREG  = format.S_IFREG
-	S_IFLNK  = format.S_IFLNK
-	S_IFSOCK = format.S_IFSOCK
-
-	TypeMask = format.TypeMask
-)
-
-type inode struct {
-	Size                        int64
-	Atime, Ctime, Mtime, Crtime uint64
-	Number                      format.InodeNumber
-	Mode                        uint16
-	Uid, Gid                    uint32
-	LinkCount                   uint32
-	XattrBlock                  uint32
-	BlockCount                  uint32
-	Devmajor, Devminor          uint32
-	Flags                       format.InodeFlag
-	Data                        []byte
-	XattrInline                 []byte
-	Children                    directory
-}
-
-func (node *inode) FileType() uint16 {
-	return node.Mode & format.TypeMask
-}
-
-func (node *inode) IsDir() bool {
-	return node.FileType() == S_IFDIR
-}
-
-// A File represents a file to be added to an ext4 file system.
-type File struct {
-	Linkname                    string
-	Size                        int64
-	Mode                        uint16
-	Uid, Gid                    uint32
-	Atime, Ctime, Mtime, Crtime time.Time
-	Devmajor, Devminor          uint32
-	Xattrs                      map[string][]byte
-}
-
-const (
-	inodeFirst        = 11
-	inodeLostAndFound = inodeFirst
-
-	blockSize               = 4096
-	blocksPerGroup          = blockSize * 8
-	inodeSize               = 256
-	maxInodesPerGroup       = blockSize * 8 // Limited by the inode bitmap
-	inodesPerGroupIncrement = blockSize / inodeSize
-
-	defaultMaxDiskSize = 16 * 1024 * 1024 * 1024        // 16GB
-	maxMaxDiskSize     = 16 * 1024 * 1024 * 1024 * 1024 // 16TB
-
-	groupDescriptorSize      = 32 // Use the small group descriptor
-	groupsPerDescriptorBlock = blockSize / groupDescriptorSize
-
-	maxFileSize             = 128 * 1024 * 1024 * 1024 // 128GB file size maximum for now
-	smallSymlinkSize        = 59                       // max symlink size that goes directly in the inode
-	maxBlocksPerExtent      = 0x8000                   // maximum number of blocks in an extent
-	inodeDataSize           = 60
-	inodeUsedSize           = 152 // fields through CrtimeExtra
-	inodeExtraSize          = inodeSize - inodeUsedSize
-	xattrInodeOverhead      = 4 + 4                       // magic number + empty next entry value
-	xattrBlockOverhead      = 32 + 4                      // header + empty next entry value
-	inlineDataXattrOverhead = xattrInodeOverhead + 16 + 4 // entry + "data"
-	inlineDataSize          = inodeDataSize + inodeExtraSize - inlineDataXattrOverhead
-)
-
-type exceededMaxSizeError struct {
-	Size int64
-}
-
-func (err exceededMaxSizeError) Error() string {
-	return fmt.Sprintf("disk exceeded maximum size of %d bytes", err.Size)
-}
-
-var directoryEntrySize = binary.Size(format.DirectoryEntry{})
-var extraIsize = uint16(inodeUsedSize - 128)
-
-type directory map[string]*inode
-
-func splitFirst(p string) (string, string) {
-	n := strings.IndexByte(p, '/')
-	if n >= 0 {
-		return p[:n], p[n+1:]
-	}
-	return p, ""
-}
-
-func (w *Writer) findPath(root *inode, p string) *inode {
-	inode := root
-	for inode != nil && len(p) != 0 {
-		name, rest := splitFirst(p)
-		p = rest
-		inode = inode.Children[name]
-	}
-	return inode
-}
-
-func timeToFsTime(t time.Time) uint64 {
-	if t.IsZero() {
-		return 0
-	}
-	s := t.Unix()
-	if s < -0x80000000 {
-		return 0x80000000
-	}
-	if s > 0x37fffffff {
-		return 0x37fffffff
-	}
-	return uint64(s) | uint64(t.Nanosecond())<<34
-}
-
-func fsTimeToTime(t uint64) time.Time {
-	if t == 0 {
-		return time.Time{}
-	}
-	s := int64(t & 0x3ffffffff)
-	if s > 0x7fffffff && s < 0x100000000 {
-		s = int64(int32(uint32(s)))
-	}
-	return time.Unix(s, int64(t>>34))
-}
-
-func (w *Writer) getInode(i format.InodeNumber) *inode {
-	if i == 0 || int(i) > len(w.inodes) {
-		return nil
-	}
-	return w.inodes[i-1]
-}
-
-var xattrPrefixes = []struct {
-	Index  uint8
-	Prefix string
-}{
-	{2, "system.posix_acl_access"},
-	{3, "system.posix_acl_default"},
-	{8, "system.richacl"},
-	{7, "system."},
-	{1, "user."},
-	{4, "trusted."},
-	{6, "security."},
-}
-
-func compressXattrName(name string) (uint8, string) {
-	for _, p := range xattrPrefixes {
-		if strings.HasPrefix(name, p.Prefix) {
-			return p.Index, name[len(p.Prefix):]
-		}
-	}
-	return 0, name
-}
-
-func decompressXattrName(index uint8, name string) string {
-	for _, p := range xattrPrefixes {
-		if index == p.Index {
-			return p.Prefix + name
-		}
-	}
-	return name
-}
-
-func hashXattrEntry(name string, value []byte) uint32 {
-	var hash uint32
-	for i := 0; i < len(name); i++ {
-		hash = (hash << 5) ^ (hash >> 27) ^ uint32(name[i])
-	}
-
-	for i := 0; i+3 < len(value); i += 4 {
-		hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(value[i:i+4])
-	}
-
-	if len(value)%4 != 0 {
-		var last [4]byte
-		copy(last[:], value[len(value)&^3:])
-		hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(last[:])
-	}
-	return hash
-}
-
-type xattr struct {
-	Name  string
-	Index uint8
-	Value []byte
-}
-
-func (x *xattr) EntryLen() int {
-	return (len(x.Name)+3)&^3 + 16
-}
-
-func (x *xattr) ValueLen() int {
-	return (len(x.Value) + 3) &^ 3
-}
-
-type xattrState struct {
-	inode, block         []xattr
-	inodeLeft, blockLeft int
-}
-
-func (s *xattrState) init() {
-	s.inodeLeft = inodeExtraSize - xattrInodeOverhead
-	s.blockLeft = blockSize - xattrBlockOverhead
-}
-
-func (s *xattrState) addXattr(name string, value []byte) bool {
-	index, name := compressXattrName(name)
-	x := xattr{
-		Index: index,
-		Name:  name,
-		Value: value,
-	}
-	length := x.EntryLen() + x.ValueLen()
-	if s.inodeLeft >= length {
-		s.inode = append(s.inode, x)
-		s.inodeLeft -= length
-	} else if s.blockLeft >= length {
-		s.block = append(s.block, x)
-		s.blockLeft -= length
-	} else {
-		return false
-	}
-	return true
-}
-
-func putXattrs(xattrs []xattr, b []byte, offsetDelta uint16) {
-	offset := uint16(len(b)) + offsetDelta
-	eb := b
-	db := b
-	for _, xattr := range xattrs {
-		vl := xattr.ValueLen()
-		offset -= uint16(vl)
-		eb[0] = uint8(len(xattr.Name))
-		eb[1] = xattr.Index
-		binary.LittleEndian.PutUint16(eb[2:], offset)
-		binary.LittleEndian.PutUint32(eb[8:], uint32(len(xattr.Value)))
-		binary.LittleEndian.PutUint32(eb[12:], hashXattrEntry(xattr.Name, xattr.Value))
-		copy(eb[16:], xattr.Name)
-		eb = eb[xattr.EntryLen():]
-		copy(db[len(db)-vl:], xattr.Value)
-		db = db[:len(db)-vl]
-	}
-}
-
-func getXattrs(b []byte, xattrs map[string][]byte, offsetDelta uint16) {
-	eb := b
-	for len(eb) != 0 {
-		nameLen := eb[0]
-		if nameLen == 0 {
-			break
-		}
-		index := eb[1]
-		offset := binary.LittleEndian.Uint16(eb[2:]) - offsetDelta
-		valueLen := binary.LittleEndian.Uint32(eb[8:])
-		attr := xattr{
-			Index: index,
-			Name:  string(eb[16 : 16+nameLen]),
-			Value: b[offset : uint32(offset)+valueLen],
-		}
-		xattrs[decompressXattrName(index, attr.Name)] = attr.Value
-		eb = eb[attr.EntryLen():]
-	}
-}
-
-func (w *Writer) writeXattrs(inode *inode, state *xattrState) error {
-	// Write the inline attributes.
-	if len(state.inode) != 0 {
-		inode.XattrInline = make([]byte, inodeExtraSize)
-		binary.LittleEndian.PutUint32(inode.XattrInline[0:], format.XAttrHeaderMagic) // Magic
-		putXattrs(state.inode, inode.XattrInline[4:], 0)
-	}
-
-	// Write the block attributes. If there was previously an xattr block, then
-	// rewrite it even if it is now empty.
-	if len(state.block) != 0 || inode.XattrBlock != 0 {
-		sort.Slice(state.block, func(i, j int) bool {
-			return state.block[i].Index < state.block[j].Index ||
-				len(state.block[i].Name) < len(state.block[j].Name) ||
-				state.block[i].Name < state.block[j].Name
-		})
-
-		var b [blockSize]byte
-		binary.LittleEndian.PutUint32(b[0:], format.XAttrHeaderMagic) // Magic
-		binary.LittleEndian.PutUint32(b[4:], 1)                       // ReferenceCount
-		binary.LittleEndian.PutUint32(b[8:], 1)                       // Blocks
-		putXattrs(state.block, b[32:], 32)
-
-		orig := w.block()
-		if inode.XattrBlock == 0 {
-			inode.XattrBlock = orig
-			inode.BlockCount++
-		} else {
-			// Reuse the original block.
-			w.seekBlock(inode.XattrBlock)
-			defer w.seekBlock(orig)
-		}
-
-		if _, err := w.write(b[:]); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (w *Writer) write(b []byte) (int, error) {
-	if w.err != nil {
-		return 0, w.err
-	}
-	if w.pos+int64(len(b)) > w.maxDiskSize {
-		w.err = exceededMaxSizeError{w.maxDiskSize}
-		return 0, w.err
-	}
-	n, err := w.bw.Write(b)
-	w.pos += int64(n)
-	w.err = err
-	return n, err
-}
-
-func (w *Writer) zero(n int64) (int64, error) {
-	if w.err != nil {
-		return 0, w.err
-	}
-	if w.pos+int64(n) > w.maxDiskSize {
-		w.err = exceededMaxSizeError{w.maxDiskSize}
-		return 0, w.err
-	}
-	n, err := io.CopyN(w.bw, zero, n)
-	w.pos += n
-	w.err = err
-	return n, err
-}
-
-func (w *Writer) makeInode(f *File, node *inode) (*inode, error) {
-	mode := f.Mode
-	if mode&format.TypeMask == 0 {
-		mode |= format.S_IFREG
-	}
-	typ := mode & format.TypeMask
-	ino := format.InodeNumber(len(w.inodes) + 1)
-	if node == nil {
-		node = &inode{
-			Number: ino,
-		}
-		if typ == S_IFDIR {
-			node.Children = make(directory)
-			node.LinkCount = 1 // A directory is linked to itself.
-		}
-	} else if node.Flags&format.InodeFlagExtents != 0 {
-		// Since we cannot deallocate or reuse blocks, don't allow updates that
-		// would invalidate data that has already been written.
-		return nil, errors.New("cannot overwrite file with non-inline data")
-	}
-	node.Mode = mode
-	node.Uid = f.Uid
-	node.Gid = f.Gid
-	node.Flags = format.InodeFlagHugeFile
-	node.Atime = timeToFsTime(f.Atime)
-	node.Ctime = timeToFsTime(f.Ctime)
-	node.Mtime = timeToFsTime(f.Mtime)
-	node.Crtime = timeToFsTime(f.Crtime)
-	node.Devmajor = f.Devmajor
-	node.Devminor = f.Devminor
-	node.Data = nil
-	node.XattrInline = nil
-
-	var xstate xattrState
-	xstate.init()
-
-	var size int64
-	switch typ {
-	case format.S_IFREG:
-		size = f.Size
-		if f.Size > maxFileSize {
-			return nil, fmt.Errorf("file too big: %d > %d", f.Size, int64(maxFileSize))
-		}
-		if f.Size <= inlineDataSize && w.supportInlineData {
-			node.Data = make([]byte, f.Size)
-			extra := 0
-			if f.Size > inodeDataSize {
-				extra = int(f.Size - inodeDataSize)
-			}
-			// Add a dummy entry for now.
-			if !xstate.addXattr("system.data", node.Data[:extra]) {
-				panic("not enough room for inline data")
-			}
-			node.Flags |= format.InodeFlagInlineData
-		}
-	case format.S_IFLNK:
-		node.Mode |= 0777 // Symlinks should appear as ugw rwx
-		size = int64(len(f.Linkname))
-		if size <= smallSymlinkSize {
-			// Special case: small symlinks go directly in Block without setting
-			// an inline data flag.
-			node.Data = make([]byte, len(f.Linkname))
-			copy(node.Data, f.Linkname)
-		}
-	case format.S_IFDIR, format.S_IFIFO, format.S_IFSOCK, format.S_IFCHR, format.S_IFBLK:
-	default:
-		return nil, fmt.Errorf("invalid mode %o", mode)
-	}
-
-	// Accumulate the extended attributes.
-	if len(f.Xattrs) != 0 {
-		// Sort the xattrs to avoid non-determinism in map iteration.
-		var xattrs []string
-		for name := range f.Xattrs {
-			xattrs = append(xattrs, name)
-		}
-		sort.Strings(xattrs)
-		for _, name := range xattrs {
-			if !xstate.addXattr(name, f.Xattrs[name]) {
-				return nil, fmt.Errorf("could not fit xattr %s", name)
-			}
-		}
-	}
-
-	if err := w.writeXattrs(node, &xstate); err != nil {
-		return nil, err
-	}
-
-	node.Size = size
-	if typ == format.S_IFLNK && size > smallSymlinkSize {
-		// Write the link name as data.
-		w.startInode("", node, size)
-		if _, err := w.Write([]byte(f.Linkname)); err != nil {
-			return nil, err
-		}
-		if err := w.finishInode(); err != nil {
-			return nil, err
-		}
-	}
-
-	if int(node.Number-1) >= len(w.inodes) {
-		w.inodes = append(w.inodes, node)
-	}
-	return node, nil
-}
-
-func (w *Writer) root() *inode {
-	return w.getInode(format.InodeRoot)
-}
-
-func (w *Writer) lookup(name string, mustExist bool) (*inode, *inode, string, error) {
-	root := w.root()
-	cleanname := path.Clean("/" + name)[1:]
-	if len(cleanname) == 0 {
-		return root, root, "", nil
-	}
-	dirname, childname := path.Split(cleanname)
-	if len(childname) == 0 || len(childname) > 0xff {
-		return nil, nil, "", fmt.Errorf("%s: invalid name", name)
-	}
-	dir := w.findPath(root, dirname)
-	if dir == nil || !dir.IsDir() {
-		return nil, nil, "", fmt.Errorf("%s: path not found", name)
-	}
-	child := dir.Children[childname]
-	if child == nil && mustExist {
-		return nil, nil, "", fmt.Errorf("%s: file not found", name)
-	}
-	return dir, child, childname, nil
-}
-
-// CreateWithParents adds a file to the file system creating the parent directories in the path if
-// they don't exist (like `mkdir -p`). These non existing parent directories are created
-// with the same permissions as that of it's parent directory. It is expected that the a
-// call to make these parent directories will be made at a later point with the correct
-// permissions, at that time the permissions of these directories will be updated.
-func (w *Writer) CreateWithParents(name string, f *File) error {
-	// go through the directories in the path one by one and create the
-	// parent directories if they don't exist.
-	cleanname := path.Clean("/" + name)[1:]
-	parentDirs, _ := path.Split(cleanname)
-	currentPath := ""
-	root := w.root()
-	dirname := ""
-	for parentDirs != "" {
-		dirname, parentDirs = splitFirst(parentDirs)
-		currentPath += "/" + dirname
-		if _, ok := root.Children[dirname]; !ok {
-			f := &File{
-				Mode:     root.Mode,
-				Atime:    time.Now(),
-				Mtime:    time.Now(),
-				Ctime:    time.Now(),
-				Crtime:   time.Now(),
-				Size:     0,
-				Uid:      root.Uid,
-				Gid:      root.Gid,
-				Devmajor: root.Devmajor,
-				Devminor: root.Devminor,
-				Xattrs:   make(map[string][]byte),
-			}
-			if err := w.Create(currentPath, f); err != nil {
-				return fmt.Errorf("failed while creating parent directories: %w", err)
-			}
-		}
-		root = root.Children[dirname]
-	}
-	return w.Create(name, f)
-}
-
-// Create adds a file to the file system.
-func (w *Writer) Create(name string, f *File) error {
-	if err := w.finishInode(); err != nil {
-		return err
-	}
-	dir, existing, childname, err := w.lookup(name, false)
-	if err != nil {
-		return err
-	}
-	var reuse *inode
-	if existing != nil {
-		if existing.IsDir() {
-			if f.Mode&TypeMask != S_IFDIR {
-				return fmt.Errorf("%s: cannot replace a directory with a file", name)
-			}
-			reuse = existing
-		} else if f.Mode&TypeMask == S_IFDIR {
-			return fmt.Errorf("%s: cannot replace a file with a directory", name)
-		} else if existing.LinkCount < 2 {
-			reuse = existing
-		}
-	} else {
-		if f.Mode&TypeMask == S_IFDIR && dir.LinkCount >= format.MaxLinks {
-			return fmt.Errorf("%s: exceeded parent directory maximum link count", name)
-		}
-	}
-	child, err := w.makeInode(f, reuse)
-	if err != nil {
-		return fmt.Errorf("%s: %s", name, err)
-	}
-	if existing != child {
-		if existing != nil {
-			existing.LinkCount--
-		}
-		dir.Children[childname] = child
-		child.LinkCount++
-		if child.IsDir() {
-			dir.LinkCount++
-		}
-	}
-	if child.Mode&format.TypeMask == format.S_IFREG {
-		w.startInode(name, child, f.Size)
-	}
-	return nil
-}
-
-// Link adds a hard link to the file system.
-func (w *Writer) Link(oldname, newname string) error {
-	if err := w.finishInode(); err != nil {
-		return err
-	}
-	newdir, existing, newchildname, err := w.lookup(newname, false)
-	if err != nil {
-		return err
-	}
-	if existing != nil && (existing.IsDir() || existing.LinkCount < 2) {
-		return fmt.Errorf("%s: cannot orphan existing file or directory", newname)
-	}
-
-	_, oldfile, _, err := w.lookup(oldname, true)
-	if err != nil {
-		return err
-	}
-	switch oldfile.Mode & format.TypeMask {
-	case format.S_IFDIR, format.S_IFLNK:
-		return fmt.Errorf("%s: link target cannot be a directory or symlink: %s", newname, oldname)
-	}
-
-	if existing != oldfile && oldfile.LinkCount >= format.MaxLinks {
-		return fmt.Errorf("%s: link target would exceed maximum link count: %s", newname, oldname)
-	}
-
-	if existing != nil {
-		existing.LinkCount--
-	}
-	oldfile.LinkCount++
-	newdir.Children[newchildname] = oldfile
-	return nil
-}
-
-// Stat returns information about a file that has been written.
-func (w *Writer) Stat(name string) (*File, error) {
-	if err := w.finishInode(); err != nil {
-		return nil, err
-	}
-	_, node, _, err := w.lookup(name, true)
-	if err != nil {
-		return nil, err
-	}
-	f := &File{
-		Size:     node.Size,
-		Mode:     node.Mode,
-		Uid:      node.Uid,
-		Gid:      node.Gid,
-		Atime:    fsTimeToTime(node.Atime),
-		Ctime:    fsTimeToTime(node.Ctime),
-		Mtime:    fsTimeToTime(node.Mtime),
-		Crtime:   fsTimeToTime(node.Crtime),
-		Devmajor: node.Devmajor,
-		Devminor: node.Devminor,
-	}
-	f.Xattrs = make(map[string][]byte)
-	if node.XattrBlock != 0 || len(node.XattrInline) != 0 {
-		if node.XattrBlock != 0 {
-			orig := w.block()
-			w.seekBlock(node.XattrBlock)
-			if w.err != nil {
-				return nil, w.err
-			}
-			var b [blockSize]byte
-			_, err := w.f.Read(b[:])
-			w.seekBlock(orig)
-			if err != nil {
-				return nil, err
-			}
-			getXattrs(b[32:], f.Xattrs, 32)
-		}
-		if len(node.XattrInline) != 0 {
-			getXattrs(node.XattrInline[4:], f.Xattrs, 0)
-			delete(f.Xattrs, "system.data")
-		}
-	}
-	if node.FileType() == S_IFLNK {
-		if node.Size > smallSymlinkSize {
-			return nil, fmt.Errorf("%s: cannot retrieve link information", name)
-		}
-		f.Linkname = string(node.Data)
-	}
-	return f, nil
-}
-
-func (w *Writer) Write(b []byte) (int, error) {
-	if len(b) == 0 {
-		return 0, nil
-	}
-	if w.dataWritten+int64(len(b)) > w.dataMax {
-		return 0, fmt.Errorf("%s: wrote too much: %d > %d", w.curName, w.dataWritten+int64(len(b)), w.dataMax)
-	}
-
-	if w.curInode.Flags&format.InodeFlagInlineData != 0 {
-		copy(w.curInode.Data[w.dataWritten:], b)
-		w.dataWritten += int64(len(b))
-		return len(b), nil
-	}
-
-	n, err := w.write(b)
-	w.dataWritten += int64(n)
-	return n, err
-}
-
-func (w *Writer) startInode(name string, inode *inode, size int64) {
-	if w.curInode != nil {
-		panic("inode already in progress")
-	}
-	w.curName = name
-	w.curInode = inode
-	w.dataWritten = 0
-	w.dataMax = size
-}
-
-func (w *Writer) block() uint32 {
-	return uint32(w.pos / blockSize)
-}
-
-func (w *Writer) seekBlock(block uint32) {
-	w.pos = int64(block) * blockSize
-	if w.err != nil {
-		return
-	}
-	w.err = w.bw.Flush()
-	if w.err != nil {
-		return
-	}
-	_, w.err = w.f.Seek(w.pos, io.SeekStart)
-}
-
-func (w *Writer) nextBlock() {
-	if w.pos%blockSize != 0 {
-		// Simplify callers; w.err is updated on failure.
-		_, _ = w.zero(blockSize - w.pos%blockSize)
-	}
-}
-
-func fillExtents(hdr *format.ExtentHeader, extents []format.ExtentLeafNode, startBlock, offset, inodeSize uint32) {
-	*hdr = format.ExtentHeader{
-		Magic:   format.ExtentHeaderMagic,
-		Entries: uint16(len(extents)),
-		Max:     uint16(cap(extents)),
-		Depth:   0,
-	}
-	for i := range extents {
-		block := offset + uint32(i)*maxBlocksPerExtent
-		length := inodeSize - block
-		if length > maxBlocksPerExtent {
-			length = maxBlocksPerExtent
-		}
-		start := startBlock + block
-		extents[i] = format.ExtentLeafNode{
-			Block:    block,
-			Length:   uint16(length),
-			StartLow: start,
-		}
-	}
-}
-
-func (w *Writer) writeExtents(inode *inode) error {
-	start := w.pos - w.dataWritten
-	if start%blockSize != 0 {
-		panic("unaligned")
-	}
-	w.nextBlock()
-
-	startBlock := uint32(start / blockSize)
-	blocks := w.block() - startBlock
-	usedBlocks := blocks
-
-	const extentNodeSize = 12
-	const extentsPerBlock = blockSize/extentNodeSize - 1
-
-	extents := (blocks + maxBlocksPerExtent - 1) / maxBlocksPerExtent
-	var b bytes.Buffer
-	if extents == 0 {
-		// Nothing to do.
-	} else if extents <= 4 {
-		var root struct {
-			hdr     format.ExtentHeader
-			extents [4]format.ExtentLeafNode
-		}
-		fillExtents(&root.hdr, root.extents[:extents], startBlock, 0, blocks)
-		_ = binary.Write(&b, binary.LittleEndian, root)
-	} else if extents <= 4*extentsPerBlock {
-		const extentsPerBlock = blockSize/extentNodeSize - 1
-		extentBlocks := extents/extentsPerBlock + 1
-		usedBlocks += extentBlocks
-		var b2 bytes.Buffer
-
-		var root struct {
-			hdr   format.ExtentHeader
-			nodes [4]format.ExtentIndexNode
-		}
-		root.hdr = format.ExtentHeader{
-			Magic:   format.ExtentHeaderMagic,
-			Entries: uint16(extentBlocks),
-			Max:     4,
-			Depth:   1,
-		}
-		for i := uint32(0); i < extentBlocks; i++ {
-			root.nodes[i] = format.ExtentIndexNode{
-				Block:   i * extentsPerBlock * maxBlocksPerExtent,
-				LeafLow: w.block(),
-			}
-			extentsInBlock := extents - i*extentBlocks
-			if extentsInBlock > extentsPerBlock {
-				extentsInBlock = extentsPerBlock
-			}
-
-			var node struct {
-				hdr     format.ExtentHeader
-				extents [extentsPerBlock]format.ExtentLeafNode
-				_       [blockSize - (extentsPerBlock+1)*extentNodeSize]byte
-			}
-
-			offset := i * extentsPerBlock * maxBlocksPerExtent
-			fillExtents(&node.hdr, node.extents[:extentsInBlock], startBlock+offset, offset, blocks)
-			_ = binary.Write(&b2, binary.LittleEndian, node)
-			if _, err := w.write(b2.Next(blockSize)); err != nil {
-				return err
-			}
-		}
-		_ = binary.Write(&b, binary.LittleEndian, root)
-	} else {
-		panic("file too big")
-	}
-
-	inode.Data = b.Bytes()
-	inode.Flags |= format.InodeFlagExtents
-	inode.BlockCount += usedBlocks
-	return w.err
-}
-
-func (w *Writer) finishInode() error {
-	if !w.initialized {
-		if err := w.init(); err != nil {
-			return err
-		}
-	}
-	if w.curInode == nil {
-		return nil
-	}
-	if w.dataWritten != w.dataMax {
-		return fmt.Errorf("did not write the right amount: %d != %d", w.dataWritten, w.dataMax)
-	}
-
-	if w.dataMax != 0 && w.curInode.Flags&format.InodeFlagInlineData == 0 {
-		if err := w.writeExtents(w.curInode); err != nil {
-			return err
-		}
-	}
-
-	w.dataWritten = 0
-	w.dataMax = 0
-	w.curInode = nil
-	return w.err
-}
-
-func modeToFileType(mode uint16) format.FileType {
-	switch mode & format.TypeMask {
-	default:
-		return format.FileTypeUnknown
-	case format.S_IFREG:
-		return format.FileTypeRegular
-	case format.S_IFDIR:
-		return format.FileTypeDirectory
-	case format.S_IFCHR:
-		return format.FileTypeCharacter
-	case format.S_IFBLK:
-		return format.FileTypeBlock
-	case format.S_IFIFO:
-		return format.FileTypeFIFO
-	case format.S_IFSOCK:
-		return format.FileTypeSocket
-	case format.S_IFLNK:
-		return format.FileTypeSymbolicLink
-	}
-}
-
-type constReader byte
-
-var zero = constReader(0)
-
-func (r constReader) Read(b []byte) (int, error) {
-	for i := range b {
-		b[i] = byte(r)
-	}
-	return len(b), nil
-}
-
-func (w *Writer) writeDirectory(dir, parent *inode) error {
-	if err := w.finishInode(); err != nil {
-		return err
-	}
-
-	// The size of the directory is not known yet.
-	w.startInode("", dir, 0x7fffffffffffffff)
-	left := blockSize
-	finishBlock := func() error {
-		if left > 0 {
-			e := format.DirectoryEntry{
-				RecordLength: uint16(left),
-			}
-			err := binary.Write(w, binary.LittleEndian, e)
-			if err != nil {
-				return err
-			}
-			left -= directoryEntrySize
-			if left < 4 {
-				panic("not enough space for trailing entry")
-			}
-			_, err = io.CopyN(w, zero, int64(left))
-			if err != nil {
-				return err
-			}
-		}
-		left = blockSize
-		return nil
-	}
-
-	writeEntry := func(ino format.InodeNumber, name string) error {
-		rlb := directoryEntrySize + len(name)
-		rl := (rlb + 3) & ^3
-		if left < rl+12 {
-			if err := finishBlock(); err != nil {
-				return err
-			}
-		}
-		e := format.DirectoryEntry{
-			Inode:        ino,
-			RecordLength: uint16(rl),
-			NameLength:   uint8(len(name)),
-			FileType:     modeToFileType(w.getInode(ino).Mode),
-		}
-		err := binary.Write(w, binary.LittleEndian, e)
-		if err != nil {
-			return err
-		}
-		_, err = w.Write([]byte(name))
-		if err != nil {
-			return err
-		}
-		var zero [4]byte
-		_, err = w.Write(zero[:rl-rlb])
-		if err != nil {
-			return err
-		}
-		left -= rl
-		return nil
-	}
-	if err := writeEntry(dir.Number, "."); err != nil {
-		return err
-	}
-	if err := writeEntry(parent.Number, ".."); err != nil {
-		return err
-	}
-
-	// Follow e2fsck's convention and sort the children by inode number.
-	var children []string
-	for name := range dir.Children {
-		children = append(children, name)
-	}
-	sort.Slice(children, func(i, j int) bool {
-		left_num := dir.Children[children[i]].Number
-		right_num := dir.Children[children[j]].Number
-
-		if left_num == right_num {
-			return children[i] < children[j]
-		}
-		return left_num < right_num
-	})
-
-	for _, name := range children {
-		child := dir.Children[name]
-		if err := writeEntry(child.Number, name); err != nil {
-			return err
-		}
-	}
-	if err := finishBlock(); err != nil {
-		return err
-	}
-	w.curInode.Size = w.dataWritten
-	w.dataMax = w.dataWritten
-	return nil
-}
-
-func (w *Writer) writeDirectoryRecursive(dir, parent *inode) error {
-	if err := w.writeDirectory(dir, parent); err != nil {
-		return err
-	}
-
-	// Follow e2fsck's convention and sort the children by inode number.
-	var children []string
-	for name := range dir.Children {
-		children = append(children, name)
-	}
-	sort.Slice(children, func(i, j int) bool {
-		left_num := dir.Children[children[i]].Number
-		right_num := dir.Children[children[j]].Number
-
-		if left_num == right_num {
-			return children[i] < children[j]
-		}
-		return left_num < right_num
-	})
-
-	for _, name := range children {
-		child := dir.Children[name]
-		if child.IsDir() {
-			if err := w.writeDirectoryRecursive(child, dir); err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (w *Writer) writeInodeTable(tableSize uint32) error {
-	var b bytes.Buffer
-	for _, inode := range w.inodes {
-		if inode != nil {
-			binode := format.Inode{
-				Mode:          inode.Mode,
-				Uid:           uint16(inode.Uid & 0xffff),
-				Gid:           uint16(inode.Gid & 0xffff),
-				SizeLow:       uint32(inode.Size & 0xffffffff),
-				SizeHigh:      uint32(inode.Size >> 32),
-				LinksCount:    uint16(inode.LinkCount),
-				BlocksLow:     inode.BlockCount,
-				Flags:         inode.Flags,
-				XattrBlockLow: inode.XattrBlock,
-				UidHigh:       uint16(inode.Uid >> 16),
-				GidHigh:       uint16(inode.Gid >> 16),
-				ExtraIsize:    uint16(inodeUsedSize - 128),
-				Atime:         uint32(inode.Atime),
-				AtimeExtra:    uint32(inode.Atime >> 32),
-				Ctime:         uint32(inode.Ctime),
-				CtimeExtra:    uint32(inode.Ctime >> 32),
-				Mtime:         uint32(inode.Mtime),
-				MtimeExtra:    uint32(inode.Mtime >> 32),
-				Crtime:        uint32(inode.Crtime),
-				CrtimeExtra:   uint32(inode.Crtime >> 32),
-			}
-			switch inode.Mode & format.TypeMask {
-			case format.S_IFDIR, format.S_IFREG, format.S_IFLNK:
-				n := copy(binode.Block[:], inode.Data)
-				if n < len(inode.Data) {
-					// Rewrite the first xattr with the data.
-					xattr := [1]xattr{{
-						Name:  "data",
-						Index: 7, // "system."
-						Value: inode.Data[n:],
-					}}
-					putXattrs(xattr[:], inode.XattrInline[4:], 0)
-				}
-			case format.S_IFBLK, format.S_IFCHR:
-				dev := inode.Devminor&0xff | inode.Devmajor<<8 | (inode.Devminor&0xffffff00)<<12
-				binary.LittleEndian.PutUint32(binode.Block[4:], dev)
-			}
-
-			_ = binary.Write(&b, binary.LittleEndian, binode)
-			b.Truncate(inodeUsedSize)
-			n, _ := b.Write(inode.XattrInline)
-			_, _ = io.CopyN(&b, zero, int64(inodeExtraSize-n))
-		} else {
-			_, _ = io.CopyN(&b, zero, inodeSize)
-		}
-		if _, err := w.write(b.Next(inodeSize)); err != nil {
-			return err
-		}
-	}
-	rest := tableSize - uint32(len(w.inodes)*inodeSize)
-	if _, err := w.zero(int64(rest)); err != nil {
-		return err
-	}
-	return nil
-}
-
-// NewWriter returns a Writer that writes an ext4 file system to the provided
-// WriteSeeker.
-func NewWriter(f io.ReadWriteSeeker, opts ...Option) *Writer {
-	w := &Writer{
-		f:           f,
-		bw:          bufio.NewWriterSize(f, 65536*8),
-		maxDiskSize: defaultMaxDiskSize,
-	}
-	for _, opt := range opts {
-		opt(w)
-	}
-	return w
-}
-
-// An Option provides extra options to NewWriter.
-type Option func(*Writer)
-
-// InlineData instructs the Writer to write small files into the inode
-// structures directly. This creates smaller images but currently is not
-// compatible with DAX.
-func InlineData(w *Writer) {
-	w.supportInlineData = true
-}
-
-// MaximumDiskSize instructs the writer to reserve enough metadata space for the
-// specified disk size. If not provided, then 16GB is the default.
-func MaximumDiskSize(size int64) Option {
-	return func(w *Writer) {
-		if size < 0 || size > maxMaxDiskSize {
-			w.maxDiskSize = maxMaxDiskSize
-		} else if size == 0 {
-			w.maxDiskSize = defaultMaxDiskSize
-		} else {
-			w.maxDiskSize = (size + blockSize - 1) &^ (blockSize - 1)
-		}
-	}
-}
-
-func (w *Writer) init() error {
-	// Skip the defective block inode.
-	w.inodes = make([]*inode, 1, 32)
-	// Create the root directory.
-	root, _ := w.makeInode(&File{
-		Mode: format.S_IFDIR | 0755,
-	}, nil)
-	root.LinkCount++ // The root is linked to itself.
-	// Skip until the first non-reserved inode.
-	w.inodes = append(w.inodes, make([]*inode, inodeFirst-len(w.inodes)-1)...)
-	maxBlocks := (w.maxDiskSize-1)/blockSize + 1
-	maxGroups := (maxBlocks-1)/blocksPerGroup + 1
-	w.gdBlocks = uint32((maxGroups-1)/groupsPerDescriptorBlock + 1)
-
-	// Skip past the superblock and block descriptor table.
-	w.seekBlock(1 + w.gdBlocks)
-	w.initialized = true
-
-	// The lost+found directory is required to exist for e2fsck to pass.
-	if err := w.Create("lost+found", &File{Mode: format.S_IFDIR | 0700}); err != nil {
-		return err
-	}
-	return w.err
-}
-
-func groupCount(blocks uint32, inodes uint32, inodesPerGroup uint32) uint32 {
-	inodeBlocksPerGroup := inodesPerGroup * inodeSize / blockSize
-	dataBlocksPerGroup := blocksPerGroup - inodeBlocksPerGroup - 2 // save room for the bitmaps
-
-	// Increase the block count to ensure there are enough groups for all the
-	// inodes.
-	minBlocks := (inodes-1)/inodesPerGroup*dataBlocksPerGroup + 1
-	if blocks < minBlocks {
-		blocks = minBlocks
-	}
-
-	return (blocks + dataBlocksPerGroup - 1) / dataBlocksPerGroup
-}
-
-func bestGroupCount(blocks uint32, inodes uint32) (groups uint32, inodesPerGroup uint32) {
-	groups = 0xffffffff
-	for ipg := uint32(inodesPerGroupIncrement); ipg <= maxInodesPerGroup; ipg += inodesPerGroupIncrement {
-		g := groupCount(blocks, inodes, ipg)
-		if g < groups {
-			groups = g
-			inodesPerGroup = ipg
-		}
-	}
-	return
-}
-
-func (w *Writer) Close() error {
-	if err := w.finishInode(); err != nil {
-		return err
-	}
-	root := w.root()
-	if err := w.writeDirectoryRecursive(root, root); err != nil {
-		return err
-	}
-	// Finish the last inode (probably a directory).
-	if err := w.finishInode(); err != nil {
-		return err
-	}
-
-	// Write the inode table
-	inodeTableOffset := w.block()
-	groups, inodesPerGroup := bestGroupCount(inodeTableOffset, uint32(len(w.inodes)))
-	err := w.writeInodeTable(groups * inodesPerGroup * inodeSize)
-	if err != nil {
-		return err
-	}
-
-	// Write the bitmaps.
-	bitmapOffset := w.block()
-	bitmapSize := groups * 2
-	validDataSize := bitmapOffset + bitmapSize
-	diskSize := validDataSize
-	minSize := (groups-1)*blocksPerGroup + 1
-	if diskSize < minSize {
-		diskSize = minSize
-	}
-
-	usedGdBlocks := (groups-1)/groupsPerDescriptorBlock + 1
-	if usedGdBlocks > w.gdBlocks {
-		return exceededMaxSizeError{w.maxDiskSize}
-	}
-
-	gds := make([]format.GroupDescriptor, w.gdBlocks*groupsPerDescriptorBlock)
-	inodeTableSizePerGroup := inodesPerGroup * inodeSize / blockSize
-	var totalUsedBlocks, totalUsedInodes uint32
-	for g := uint32(0); g < groups; g++ {
-		var b [blockSize * 2]byte
-		var dirCount, usedInodeCount, usedBlockCount uint16
-
-		// Block bitmap
-		if (g+1)*blocksPerGroup <= validDataSize {
-			// This group is fully allocated.
-			for j := range b[:blockSize] {
-				b[j] = 0xff
-			}
-			usedBlockCount = blocksPerGroup
-		} else if g*blocksPerGroup < validDataSize {
-			for j := uint32(0); j < validDataSize-g*blocksPerGroup; j++ {
-				b[j/8] |= 1 << (j % 8)
-				usedBlockCount++
-			}
-		}
-		if g == 0 {
-			// Unused group descriptor blocks should be cleared.
-			for j := 1 + usedGdBlocks; j < 1+w.gdBlocks; j++ {
-				b[j/8] &^= 1 << (j % 8)
-				usedBlockCount--
-			}
-		}
-		if g == groups-1 && diskSize%blocksPerGroup != 0 {
-			// Blocks that aren't present in the disk should be marked as
-			// allocated.
-			for j := diskSize % blocksPerGroup; j < blocksPerGroup; j++ {
-				b[j/8] |= 1 << (j % 8)
-				usedBlockCount++
-			}
-		}
-		// Inode bitmap
-		for j := uint32(0); j < inodesPerGroup; j++ {
-			ino := format.InodeNumber(1 + g*inodesPerGroup + j)
-			inode := w.getInode(ino)
-			if ino < inodeFirst || inode != nil {
-				b[blockSize+j/8] |= 1 << (j % 8)
-				usedInodeCount++
-			}
-			if inode != nil && inode.Mode&format.TypeMask == format.S_IFDIR {
-				dirCount++
-			}
-		}
-		_, err := w.write(b[:])
-		if err != nil {
-			return err
-		}
-		gds[g] = format.GroupDescriptor{
-			BlockBitmapLow:     bitmapOffset + 2*g,
-			InodeBitmapLow:     bitmapOffset + 2*g + 1,
-			InodeTableLow:      inodeTableOffset + g*inodeTableSizePerGroup,
-			UsedDirsCountLow:   dirCount,
-			FreeInodesCountLow: uint16(inodesPerGroup) - usedInodeCount,
-			FreeBlocksCountLow: blocksPerGroup - usedBlockCount,
-		}
-
-		totalUsedBlocks += uint32(usedBlockCount)
-		totalUsedInodes += uint32(usedInodeCount)
-	}
-
-	// Zero up to the disk size.
-	_, err = w.zero(int64(diskSize-bitmapOffset-bitmapSize) * blockSize)
-	if err != nil {
-		return err
-	}
-
-	// Write the block descriptors
-	w.seekBlock(1)
-	if w.err != nil {
-		return w.err
-	}
-	err = binary.Write(w.bw, binary.LittleEndian, gds)
-	if err != nil {
-		return err
-	}
-
-	// Write the super block
-	var blk [blockSize]byte
-	b := bytes.NewBuffer(blk[:1024])
-	sb := &format.SuperBlock{
-		InodesCount:        inodesPerGroup * groups,
-		BlocksCountLow:     diskSize,
-		FreeBlocksCountLow: blocksPerGroup*groups - totalUsedBlocks,
-		FreeInodesCount:    inodesPerGroup*groups - totalUsedInodes,
-		FirstDataBlock:     0,
-		LogBlockSize:       2, // 2^(10 + 2)
-		LogClusterSize:     2,
-		BlocksPerGroup:     blocksPerGroup,
-		ClustersPerGroup:   blocksPerGroup,
-		InodesPerGroup:     inodesPerGroup,
-		Magic:              format.SuperBlockMagic,
-		State:              1, // cleanly unmounted
-		Errors:             1, // continue on error?
-		CreatorOS:          0, // Linux
-		RevisionLevel:      1, // dynamic inode sizes
-		FirstInode:         inodeFirst,
-		LpfInode:           inodeLostAndFound,
-		InodeSize:          inodeSize,
-		FeatureCompat:      format.CompatSparseSuper2 | format.CompatExtAttr,
-		FeatureIncompat:    format.IncompatFiletype | format.IncompatExtents | format.IncompatFlexBg,
-		FeatureRoCompat:    format.RoCompatLargeFile | format.RoCompatHugeFile | format.RoCompatExtraIsize | format.RoCompatReadonly,
-		MinExtraIsize:      extraIsize,
-		WantExtraIsize:     extraIsize,
-		LogGroupsPerFlex:   31,
-	}
-	if w.supportInlineData {
-		sb.FeatureIncompat |= format.IncompatInlineData
-	}
-	_ = binary.Write(b, binary.LittleEndian, sb)
-	w.seekBlock(0)
-	if _, err := w.write(blk[:]); err != nil {
-		return err
-	}
-	w.seekBlock(diskSize)
-	return w.err
-}

+ 0 - 411
vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go

@@ -1,411 +0,0 @@
-package format
-
-type SuperBlock struct {
-	InodesCount          uint32
-	BlocksCountLow       uint32
-	RootBlocksCountLow   uint32
-	FreeBlocksCountLow   uint32
-	FreeInodesCount      uint32
-	FirstDataBlock       uint32
-	LogBlockSize         uint32
-	LogClusterSize       uint32
-	BlocksPerGroup       uint32
-	ClustersPerGroup     uint32
-	InodesPerGroup       uint32
-	Mtime                uint32
-	Wtime                uint32
-	MountCount           uint16
-	MaxMountCount        uint16
-	Magic                uint16
-	State                uint16
-	Errors               uint16
-	MinorRevisionLevel   uint16
-	LastCheck            uint32
-	CheckInterval        uint32
-	CreatorOS            uint32
-	RevisionLevel        uint32
-	DefaultReservedUid   uint16
-	DefaultReservedGid   uint16
-	FirstInode           uint32
-	InodeSize            uint16
-	BlockGroupNr         uint16
-	FeatureCompat        CompatFeature
-	FeatureIncompat      IncompatFeature
-	FeatureRoCompat      RoCompatFeature
-	UUID                 [16]uint8
-	VolumeName           [16]byte
-	LastMounted          [64]byte
-	AlgorithmUsageBitmap uint32
-	PreallocBlocks       uint8
-	PreallocDirBlocks    uint8
-	ReservedGdtBlocks    uint16
-	JournalUUID          [16]uint8
-	JournalInum          uint32
-	JournalDev           uint32
-	LastOrphan           uint32
-	HashSeed             [4]uint32
-	DefHashVersion       uint8
-	JournalBackupType    uint8
-	DescSize             uint16
-	DefaultMountOpts     uint32
-	FirstMetaBg          uint32
-	MkfsTime             uint32
-	JournalBlocks        [17]uint32
-	BlocksCountHigh      uint32
-	RBlocksCountHigh     uint32
-	FreeBlocksCountHigh  uint32
-	MinExtraIsize        uint16
-	WantExtraIsize       uint16
-	Flags                uint32
-	RaidStride           uint16
-	MmpInterval          uint16
-	MmpBlock             uint64
-	RaidStripeWidth      uint32
-	LogGroupsPerFlex     uint8
-	ChecksumType         uint8
-	ReservedPad          uint16
-	KbytesWritten        uint64
-	SnapshotInum         uint32
-	SnapshotID           uint32
-	SnapshotRBlocksCount uint64
-	SnapshotList         uint32
-	ErrorCount           uint32
-	FirstErrorTime       uint32
-	FirstErrorInode      uint32
-	FirstErrorBlock      uint64
-	FirstErrorFunc       [32]uint8
-	FirstErrorLine       uint32
-	LastErrorTime        uint32
-	LastErrorInode       uint32
-	LastErrorLine        uint32
-	LastErrorBlock       uint64
-	LastErrorFunc        [32]uint8
-	MountOpts            [64]uint8
-	UserQuotaInum        uint32
-	GroupQuotaInum       uint32
-	OverheadBlocks       uint32
-	BackupBgs            [2]uint32
-	EncryptAlgos         [4]uint8
-	EncryptPwSalt        [16]uint8
-	LpfInode             uint32
-	ProjectQuotaInum     uint32
-	ChecksumSeed         uint32
-	WtimeHigh            uint8
-	MtimeHigh            uint8
-	MkfsTimeHigh         uint8
-	LastcheckHigh        uint8
-	FirstErrorTimeHigh   uint8
-	LastErrorTimeHigh    uint8
-	Pad                  [2]uint8
-	Reserved             [96]uint32
-	Checksum             uint32
-}
-
-const SuperBlockMagic uint16 = 0xef53
-
-type CompatFeature uint32
-type IncompatFeature uint32
-type RoCompatFeature uint32
-
-const (
-	CompatDirPrealloc   CompatFeature = 0x1
-	CompatImagicInodes  CompatFeature = 0x2
-	CompatHasJournal    CompatFeature = 0x4
-	CompatExtAttr       CompatFeature = 0x8
-	CompatResizeInode   CompatFeature = 0x10
-	CompatDirIndex      CompatFeature = 0x20
-	CompatLazyBg        CompatFeature = 0x40
-	CompatExcludeInode  CompatFeature = 0x80
-	CompatExcludeBitmap CompatFeature = 0x100
-	CompatSparseSuper2  CompatFeature = 0x200
-
-	IncompatCompression IncompatFeature = 0x1
-	IncompatFiletype    IncompatFeature = 0x2
-	IncompatRecover     IncompatFeature = 0x4
-	IncompatJournalDev  IncompatFeature = 0x8
-	IncompatMetaBg      IncompatFeature = 0x10
-	IncompatExtents     IncompatFeature = 0x40
-	Incompat_64Bit      IncompatFeature = 0x80
-	IncompatMmp         IncompatFeature = 0x100
-	IncompatFlexBg      IncompatFeature = 0x200
-	IncompatEaInode     IncompatFeature = 0x400
-	IncompatDirdata     IncompatFeature = 0x1000
-	IncompatCsumSeed    IncompatFeature = 0x2000
-	IncompatLargedir    IncompatFeature = 0x4000
-	IncompatInlineData  IncompatFeature = 0x8000
-	IncompatEncrypt     IncompatFeature = 0x10000
-
-	RoCompatSparseSuper  RoCompatFeature = 0x1
-	RoCompatLargeFile    RoCompatFeature = 0x2
-	RoCompatBtreeDir     RoCompatFeature = 0x4
-	RoCompatHugeFile     RoCompatFeature = 0x8
-	RoCompatGdtCsum      RoCompatFeature = 0x10
-	RoCompatDirNlink     RoCompatFeature = 0x20
-	RoCompatExtraIsize   RoCompatFeature = 0x40
-	RoCompatHasSnapshot  RoCompatFeature = 0x80
-	RoCompatQuota        RoCompatFeature = 0x100
-	RoCompatBigalloc     RoCompatFeature = 0x200
-	RoCompatMetadataCsum RoCompatFeature = 0x400
-	RoCompatReplica      RoCompatFeature = 0x800
-	RoCompatReadonly     RoCompatFeature = 0x1000
-	RoCompatProject      RoCompatFeature = 0x2000
-)
-
-type BlockGroupFlag uint16
-
-const (
-	BlockGroupInodeUninit BlockGroupFlag = 0x1
-	BlockGroupBlockUninit BlockGroupFlag = 0x2
-	BlockGroupInodeZeroed BlockGroupFlag = 0x4
-)
-
-type GroupDescriptor struct {
-	BlockBitmapLow     uint32
-	InodeBitmapLow     uint32
-	InodeTableLow      uint32
-	FreeBlocksCountLow uint16
-	FreeInodesCountLow uint16
-	UsedDirsCountLow   uint16
-	Flags              BlockGroupFlag
-	ExcludeBitmapLow   uint32
-	BlockBitmapCsumLow uint16
-	InodeBitmapCsumLow uint16
-	ItableUnusedLow    uint16
-	Checksum           uint16
-}
-
-type GroupDescriptor64 struct {
-	GroupDescriptor
-	BlockBitmapHigh     uint32
-	InodeBitmapHigh     uint32
-	InodeTableHigh      uint32
-	FreeBlocksCountHigh uint16
-	FreeInodesCountHigh uint16
-	UsedDirsCountHigh   uint16
-	ItableUnusedHigh    uint16
-	ExcludeBitmapHigh   uint32
-	BlockBitmapCsumHigh uint16
-	InodeBitmapCsumHigh uint16
-	Reserved            uint32
-}
-
-const (
-	S_IXOTH  = 0x1
-	S_IWOTH  = 0x2
-	S_IROTH  = 0x4
-	S_IXGRP  = 0x8
-	S_IWGRP  = 0x10
-	S_IRGRP  = 0x20
-	S_IXUSR  = 0x40
-	S_IWUSR  = 0x80
-	S_IRUSR  = 0x100
-	S_ISVTX  = 0x200
-	S_ISGID  = 0x400
-	S_ISUID  = 0x800
-	S_IFIFO  = 0x1000
-	S_IFCHR  = 0x2000
-	S_IFDIR  = 0x4000
-	S_IFBLK  = 0x6000
-	S_IFREG  = 0x8000
-	S_IFLNK  = 0xA000
-	S_IFSOCK = 0xC000
-
-	TypeMask uint16 = 0xF000
-)
-
-type InodeNumber uint32
-
-const (
-	InodeRoot = 2
-)
-
-type Inode struct {
-	Mode                 uint16
-	Uid                  uint16
-	SizeLow              uint32
-	Atime                uint32
-	Ctime                uint32
-	Mtime                uint32
-	Dtime                uint32
-	Gid                  uint16
-	LinksCount           uint16
-	BlocksLow            uint32
-	Flags                InodeFlag
-	Version              uint32
-	Block                [60]byte
-	Generation           uint32
-	XattrBlockLow        uint32
-	SizeHigh             uint32
-	ObsoleteFragmentAddr uint32
-	BlocksHigh           uint16
-	XattrBlockHigh       uint16
-	UidHigh              uint16
-	GidHigh              uint16
-	ChecksumLow          uint16
-	Reserved             uint16
-	ExtraIsize           uint16
-	ChecksumHigh         uint16
-	CtimeExtra           uint32
-	MtimeExtra           uint32
-	AtimeExtra           uint32
-	Crtime               uint32
-	CrtimeExtra          uint32
-	VersionHigh          uint32
-	Projid               uint32
-}
-
-type InodeFlag uint32
-
-const (
-	InodeFlagSecRm              InodeFlag = 0x1
-	InodeFlagUnRm               InodeFlag = 0x2
-	InodeFlagCompressed         InodeFlag = 0x4
-	InodeFlagSync               InodeFlag = 0x8
-	InodeFlagImmutable          InodeFlag = 0x10
-	InodeFlagAppend             InodeFlag = 0x20
-	InodeFlagNoDump             InodeFlag = 0x40
-	InodeFlagNoAtime            InodeFlag = 0x80
-	InodeFlagDirtyCompressed    InodeFlag = 0x100
-	InodeFlagCompressedClusters InodeFlag = 0x200
-	InodeFlagNoCompress         InodeFlag = 0x400
-	InodeFlagEncrypted          InodeFlag = 0x800
-	InodeFlagHashedIndex        InodeFlag = 0x1000
-	InodeFlagMagic              InodeFlag = 0x2000
-	InodeFlagJournalData        InodeFlag = 0x4000
-	InodeFlagNoTail             InodeFlag = 0x8000
-	InodeFlagDirSync            InodeFlag = 0x10000
-	InodeFlagTopDir             InodeFlag = 0x20000
-	InodeFlagHugeFile           InodeFlag = 0x40000
-	InodeFlagExtents            InodeFlag = 0x80000
-	InodeFlagEaInode            InodeFlag = 0x200000
-	InodeFlagEOFBlocks          InodeFlag = 0x400000
-	InodeFlagSnapfile           InodeFlag = 0x01000000
-	InodeFlagSnapfileDeleted    InodeFlag = 0x04000000
-	InodeFlagSnapfileShrunk     InodeFlag = 0x08000000
-	InodeFlagInlineData         InodeFlag = 0x10000000
-	InodeFlagProjectIDInherit   InodeFlag = 0x20000000
-	InodeFlagReserved           InodeFlag = 0x80000000
-)
-
-const (
-	MaxLinks = 65000
-)
-
-type ExtentHeader struct {
-	Magic      uint16
-	Entries    uint16
-	Max        uint16
-	Depth      uint16
-	Generation uint32
-}
-
-const ExtentHeaderMagic uint16 = 0xf30a
-
-type ExtentIndexNode struct {
-	Block    uint32
-	LeafLow  uint32
-	LeafHigh uint16
-	Unused   uint16
-}
-
-type ExtentLeafNode struct {
-	Block     uint32
-	Length    uint16
-	StartHigh uint16
-	StartLow  uint32
-}
-
-type ExtentTail struct {
-	Checksum uint32
-}
-
-type DirectoryEntry struct {
-	Inode        InodeNumber
-	RecordLength uint16
-	NameLength   uint8
-	FileType     FileType
-	//Name         []byte
-}
-
-type FileType uint8
-
-const (
-	FileTypeUnknown      FileType = 0x0
-	FileTypeRegular      FileType = 0x1
-	FileTypeDirectory    FileType = 0x2
-	FileTypeCharacter    FileType = 0x3
-	FileTypeBlock        FileType = 0x4
-	FileTypeFIFO         FileType = 0x5
-	FileTypeSocket       FileType = 0x6
-	FileTypeSymbolicLink FileType = 0x7
-)
-
-type DirectoryEntryTail struct {
-	ReservedZero1 uint32
-	RecordLength  uint16
-	ReservedZero2 uint8
-	FileType      uint8
-	Checksum      uint32
-}
-
-type DirectoryTreeRoot struct {
-	Dot            DirectoryEntry
-	DotName        [4]byte
-	DotDot         DirectoryEntry
-	DotDotName     [4]byte
-	ReservedZero   uint32
-	HashVersion    uint8
-	InfoLength     uint8
-	IndirectLevels uint8
-	UnusedFlags    uint8
-	Limit          uint16
-	Count          uint16
-	Block          uint32
-	//Entries        []DirectoryTreeEntry
-}
-
-type DirectoryTreeNode struct {
-	FakeInode        uint32
-	FakeRecordLength uint16
-	NameLength       uint8
-	FileType         uint8
-	Limit            uint16
-	Count            uint16
-	Block            uint32
-	//Entries          []DirectoryTreeEntry
-}
-
-type DirectoryTreeEntry struct {
-	Hash  uint32
-	Block uint32
-}
-
-type DirectoryTreeTail struct {
-	Reserved uint32
-	Checksum uint32
-}
-
-type XAttrInodeBodyHeader struct {
-	Magic uint32
-}
-
-type XAttrHeader struct {
-	Magic          uint32
-	ReferenceCount uint32
-	Blocks         uint32
-	Hash           uint32
-	Checksum       uint32
-	Reserved       [3]uint32
-}
-
-const XAttrHeaderMagic uint32 = 0xea020000
-
-type XAttrEntry struct {
-	NameLength  uint8
-	NameIndex   uint8
-	ValueOffset uint16
-	ValueInum   uint32
-	ValueSize   uint32
-	Hash        uint32
-	//Name        []byte
-}

+ 0 - 209
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go

@@ -1,209 +0,0 @@
-package tar2ext4
-
-import (
-	"archive/tar"
-	"bufio"
-	"encoding/binary"
-	"io"
-	"os"
-	"path"
-	"strings"
-
-	"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
-	"github.com/Microsoft/hcsshim/ext4/internal/format"
-)
-
-type params struct {
-	convertWhiteout bool
-	appendVhdFooter bool
-	ext4opts        []compactext4.Option
-}
-
-// Option is the type for optional parameters to Convert.
-type Option func(*params)
-
-// ConvertWhiteout instructs the converter to convert OCI-style whiteouts
-// (beginning with .wh.) to overlay-style whiteouts.
-func ConvertWhiteout(p *params) {
-	p.convertWhiteout = true
-}
-
-// AppendVhdFooter instructs the converter to add a fixed VHD footer to the
-// file.
-func AppendVhdFooter(p *params) {
-	p.appendVhdFooter = true
-}
-
-// InlineData instructs the converter to write small files into the inode
-// structures directly. This creates smaller images but currently is not
-// compatible with DAX.
-func InlineData(p *params) {
-	p.ext4opts = append(p.ext4opts, compactext4.InlineData)
-}
-
-// MaximumDiskSize instructs the writer to limit the disk size to the specified
-// value. This also reserves enough metadata space for the specified disk size.
-// If not provided, then 16GB is the default.
-func MaximumDiskSize(size int64) Option {
-	return func(p *params) {
-		p.ext4opts = append(p.ext4opts, compactext4.MaximumDiskSize(size))
-	}
-}
-
-const (
-	whiteoutPrefix = ".wh."
-	opaqueWhiteout = ".wh..wh..opq"
-)
-
-// Convert writes a compact ext4 file system image that contains the files in the
-// input tar stream.
-func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
-	var p params
-	for _, opt := range options {
-		opt(&p)
-	}
-	t := tar.NewReader(bufio.NewReader(r))
-	fs := compactext4.NewWriter(w, p.ext4opts...)
-	for {
-		hdr, err := t.Next()
-		if err == io.EOF {
-			break
-		}
-		if err != nil {
-			return err
-		}
-
-		if p.convertWhiteout {
-			dir, name := path.Split(hdr.Name)
-			if strings.HasPrefix(name, whiteoutPrefix) {
-				if name == opaqueWhiteout {
-					// Update the directory with the appropriate xattr.
-					f, err := fs.Stat(dir)
-					if err != nil {
-						return err
-					}
-					f.Xattrs["trusted.overlay.opaque"] = []byte("y")
-					err = fs.Create(dir, f)
-					if err != nil {
-						return err
-					}
-				} else {
-					// Create an overlay-style whiteout.
-					f := &compactext4.File{
-						Mode:     compactext4.S_IFCHR,
-						Devmajor: 0,
-						Devminor: 0,
-					}
-					err = fs.Create(path.Join(dir, name[len(whiteoutPrefix):]), f)
-					if err != nil {
-						return err
-					}
-				}
-
-				continue
-			}
-		}
-
-		if hdr.Typeflag == tar.TypeLink {
-			err = fs.Link(hdr.Linkname, hdr.Name)
-			if err != nil {
-				return err
-			}
-		} else {
-			f := &compactext4.File{
-				Mode:     uint16(hdr.Mode),
-				Atime:    hdr.AccessTime,
-				Mtime:    hdr.ModTime,
-				Ctime:    hdr.ChangeTime,
-				Crtime:   hdr.ModTime,
-				Size:     hdr.Size,
-				Uid:      uint32(hdr.Uid),
-				Gid:      uint32(hdr.Gid),
-				Linkname: hdr.Linkname,
-				Devmajor: uint32(hdr.Devmajor),
-				Devminor: uint32(hdr.Devminor),
-				Xattrs:   make(map[string][]byte),
-			}
-			for key, value := range hdr.PAXRecords {
-				const xattrPrefix = "SCHILY.xattr."
-				if strings.HasPrefix(key, xattrPrefix) {
-					f.Xattrs[key[len(xattrPrefix):]] = []byte(value)
-				}
-			}
-
-			var typ uint16
-			switch hdr.Typeflag {
-			case tar.TypeReg, tar.TypeRegA:
-				typ = compactext4.S_IFREG
-			case tar.TypeSymlink:
-				typ = compactext4.S_IFLNK
-			case tar.TypeChar:
-				typ = compactext4.S_IFCHR
-			case tar.TypeBlock:
-				typ = compactext4.S_IFBLK
-			case tar.TypeDir:
-				typ = compactext4.S_IFDIR
-			case tar.TypeFifo:
-				typ = compactext4.S_IFIFO
-			}
-			f.Mode &= ^compactext4.TypeMask
-			f.Mode |= typ
-			err = fs.CreateWithParents(hdr.Name, f)
-			if err != nil {
-				return err
-			}
-			_, err = io.Copy(fs, t)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	err := fs.Close()
-	if err != nil {
-		return err
-	}
-	if p.appendVhdFooter {
-		size, err := w.Seek(0, io.SeekEnd)
-		if err != nil {
-			return err
-		}
-		err = binary.Write(w, binary.BigEndian, makeFixedVHDFooter(size))
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// ReadExt4SuperBlock reads and returns ext4 super block from VHD
-//
-// The layout on disk is as follows:
-// | Group 0 padding     | - 1024 bytes
-// | ext4 SuperBlock     | - 1 block
-// | Group Descriptors   | - many blocks
-// | Reserved GDT Blocks | - many blocks
-// | Data Block Bitmap   | - 1 block
-// | inode Bitmap        | - 1 block
-// | inode Table         | - many blocks
-// | Data Blocks         | - many blocks
-//
-// More details can be found here https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout
-//
-// Our goal is to skip the Group 0 padding, read and return the ext4 SuperBlock
-func ReadExt4SuperBlock(vhdPath string) (*format.SuperBlock, error) {
-	vhd, err := os.OpenFile(vhdPath, os.O_RDONLY, 0)
-	if err != nil {
-		return nil, err
-	}
-	defer vhd.Close()
-
-	// Skip padding at the start
-	if _, err := vhd.Seek(1024, io.SeekStart); err != nil {
-		return nil, err
-	}
-	var sb format.SuperBlock
-	if err := binary.Read(vhd, binary.LittleEndian, &sb); err != nil {
-		return nil, err
-	}
-	return &sb, nil
-}

+ 0 - 76
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go

@@ -1,76 +0,0 @@
-package tar2ext4
-
-import (
-	"bytes"
-	"crypto/rand"
-	"encoding/binary"
-)
-
-// Constants for the VHD footer
-const (
-	cookieMagic            = "conectix"
-	featureMask            = 0x2
-	fileFormatVersionMagic = 0x00010000
-	fixedDataOffset        = -1
-	creatorVersionMagic    = 0x000a0000
-	diskTypeFixed          = 2
-)
-
-type vhdFooter struct {
-	Cookie             [8]byte
-	Features           uint32
-	FileFormatVersion  uint32
-	DataOffset         int64
-	TimeStamp          uint32
-	CreatorApplication [4]byte
-	CreatorVersion     uint32
-	CreatorHostOS      [4]byte
-	OriginalSize       int64
-	CurrentSize        int64
-	DiskGeometry       uint32
-	DiskType           uint32
-	Checksum           uint32
-	UniqueID           [16]uint8
-	SavedState         uint8
-	Reserved           [427]uint8
-}
-
-func makeFixedVHDFooter(size int64) *vhdFooter {
-	footer := &vhdFooter{
-		Features:          featureMask,
-		FileFormatVersion: fileFormatVersionMagic,
-		DataOffset:        fixedDataOffset,
-		CreatorVersion:    creatorVersionMagic,
-		OriginalSize:      size,
-		CurrentSize:       size,
-		DiskType:          diskTypeFixed,
-		UniqueID:          generateUUID(),
-	}
-	copy(footer.Cookie[:], cookieMagic)
-	footer.Checksum = calculateCheckSum(footer)
-	return footer
-}
-
-func calculateCheckSum(footer *vhdFooter) uint32 {
-	oldchk := footer.Checksum
-	footer.Checksum = 0
-
-	buf := &bytes.Buffer{}
-	_ = binary.Write(buf, binary.BigEndian, footer)
-
-	var chk uint32
-	bufBytes := buf.Bytes()
-	for i := 0; i < len(bufBytes); i++ {
-		chk += uint32(bufBytes[i])
-	}
-	footer.Checksum = oldchk
-	return uint32(^chk)
-}
-
-func generateUUID() [16]byte {
-	res := [16]byte{}
-	if _, err := rand.Read(res[:]); err != nil {
-		panic(err)
-	}
-	return res
-}

+ 0 - 109
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go

@@ -1,109 +0,0 @@
-package remotefs
-
-import (
-	"errors"
-	"os"
-	"time"
-)
-
-// RemotefsCmd is the name of the remotefs meta command
-const RemotefsCmd = "remotefs"
-
-// Name of the commands when called from the cli context (remotefs <CMD> ...)
-const (
-	StatCmd           = "stat"
-	LstatCmd          = "lstat"
-	ReadlinkCmd       = "readlink"
-	MkdirCmd          = "mkdir"
-	MkdirAllCmd       = "mkdirall"
-	RemoveCmd         = "remove"
-	RemoveAllCmd      = "removeall"
-	LinkCmd           = "link"
-	SymlinkCmd        = "symlink"
-	LchmodCmd         = "lchmod"
-	LchownCmd         = "lchown"
-	MknodCmd          = "mknod"
-	MkfifoCmd         = "mkfifo"
-	OpenFileCmd       = "openfile"
-	ReadFileCmd       = "readfile"
-	WriteFileCmd      = "writefile"
-	ReadDirCmd        = "readdir"
-	ResolvePathCmd    = "resolvepath"
-	ExtractArchiveCmd = "extractarchive"
-	ArchivePathCmd    = "archivepath"
-)
-
-// ErrInvalid is returned if the parameters are invalid
-var ErrInvalid = errors.New("invalid arguments")
-
-// ErrUnknown is returned for an unknown remotefs command
-var ErrUnknown = errors.New("unkown command")
-
-// ExportedError is the serialized version of the a Go error.
-// It also provides a trivial implementation of the error interface.
-type ExportedError struct {
-	ErrString string
-	ErrNum    int `json:",omitempty"`
-}
-
-// Error returns an error string
-func (ee *ExportedError) Error() string {
-	return ee.ErrString
-}
-
-// FileInfo is the stat struct returned by the remotefs system. It
-// fulfills the os.FileInfo interface.
-type FileInfo struct {
-	NameVar    string
-	SizeVar    int64
-	ModeVar    os.FileMode
-	ModTimeVar int64 // Serialization of time.Time breaks in travis, so use an int
-	IsDirVar   bool
-}
-
-var _ os.FileInfo = &FileInfo{}
-
-// Name returns the filename from a FileInfo structure
-func (f *FileInfo) Name() string { return f.NameVar }
-
-// Size returns the size from a FileInfo structure
-func (f *FileInfo) Size() int64 { return f.SizeVar }
-
-// Mode returns the mode from a FileInfo structure
-func (f *FileInfo) Mode() os.FileMode { return f.ModeVar }
-
-// ModTime returns the modification time from a FileInfo structure
-func (f *FileInfo) ModTime() time.Time { return time.Unix(0, f.ModTimeVar) }
-
-// IsDir returns the is-directory indicator from a FileInfo structure
-func (f *FileInfo) IsDir() bool { return f.IsDirVar }
-
-// Sys provides an interface to a FileInfo structure
-func (f *FileInfo) Sys() interface{} { return nil }
-
-// FileHeader is a header for remote *os.File operations for remotefs.OpenFile
-type FileHeader struct {
-	Cmd  uint32
-	Size uint64
-}
-
-const (
-	// Read request command.
-	Read uint32 = iota
-	// Write request command.
-	Write
-	// Seek request command.
-	Seek
-	// Close request command.
-	Close
-	// CmdOK is a response meaning request succeeded.
-	CmdOK
-	// CmdFailed is a response meaning request failed.
-	CmdFailed
-)
-
-// SeekHeader is header for the Seek operation for remotefs.OpenFile
-type SeekHeader struct {
-	Offset int64
-	Whence int32
-}

+ 0 - 578
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go

@@ -1,578 +0,0 @@
-// +build !windows
-
-package remotefs
-
-import (
-	"bytes"
-	"encoding/binary"
-	"encoding/json"
-	"io"
-	"os"
-	"path/filepath"
-	"strconv"
-
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/symlink"
-	"github.com/sirupsen/logrus"
-	"golang.org/x/sys/unix"
-)
-
-// Func is the function definition for a generic remote fs function
-// The input to the function is any serialized structs / data from in and the string slice
-// from args. The output of the function will be serialized and written to out.
-type Func func(stdin io.Reader, stdout io.Writer, args []string) error
-
-// Commands provide a string -> remotefs function mapping.
-// This is useful for commandline programs that will receive a string
-// as the function to execute.
-var Commands = map[string]Func{
-	StatCmd:           Stat,
-	LstatCmd:          Lstat,
-	ReadlinkCmd:       Readlink,
-	MkdirCmd:          Mkdir,
-	MkdirAllCmd:       MkdirAll,
-	RemoveCmd:         Remove,
-	RemoveAllCmd:      RemoveAll,
-	LinkCmd:           Link,
-	SymlinkCmd:        Symlink,
-	LchmodCmd:         Lchmod,
-	LchownCmd:         Lchown,
-	MknodCmd:          Mknod,
-	MkfifoCmd:         Mkfifo,
-	OpenFileCmd:       OpenFile,
-	ReadFileCmd:       ReadFile,
-	WriteFileCmd:      WriteFile,
-	ReadDirCmd:        ReadDir,
-	ResolvePathCmd:    ResolvePath,
-	ExtractArchiveCmd: ExtractArchive,
-	ArchivePathCmd:    ArchivePath,
-}
-
-// Stat functions like os.Stat.
-// Args:
-// - args[0] is the path
-// Out:
-// - out = FileInfo object
-func Stat(in io.Reader, out io.Writer, args []string) error {
-	return stat(in, out, args, os.Stat)
-}
-
-// Lstat functions like os.Lstat.
-// Args:
-// - args[0] is the path
-// Out:
-// - out = FileInfo object
-func Lstat(in io.Reader, out io.Writer, args []string) error {
-	return stat(in, out, args, os.Lstat)
-}
-
-func stat(in io.Reader, out io.Writer, args []string, statfunc func(string) (os.FileInfo, error)) error {
-	if len(args) < 1 {
-		return ErrInvalid
-	}
-
-	fi, err := statfunc(args[0])
-	if err != nil {
-		return err
-	}
-
-	info := FileInfo{
-		NameVar:    fi.Name(),
-		SizeVar:    fi.Size(),
-		ModeVar:    fi.Mode(),
-		ModTimeVar: fi.ModTime().UnixNano(),
-		IsDirVar:   fi.IsDir(),
-	}
-
-	buf, err := json.Marshal(info)
-	if err != nil {
-		return err
-	}
-
-	if _, err := out.Write(buf); err != nil {
-		return err
-	}
-	return nil
-}
-
-// Readlink works like os.Readlink
-// In:
-//  - args[0] is path
-// Out:
-//  - Write link result to out
-func Readlink(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 1 {
-		return ErrInvalid
-	}
-
-	l, err := os.Readlink(args[0])
-	if err != nil {
-		return err
-	}
-
-	if _, err := out.Write([]byte(l)); err != nil {
-		return err
-	}
-	return nil
-}
-
-// Mkdir works like os.Mkdir
-// Args:
-// - args[0] is the path
-// - args[1] is the permissions in octal (like 0755)
-func Mkdir(in io.Reader, out io.Writer, args []string) error {
-	return mkdir(in, out, args, os.Mkdir)
-}
-
-// MkdirAll works like os.MkdirAll.
-// Args:
-// - args[0] is the path
-// - args[1] is the permissions in octal (like 0755)
-func MkdirAll(in io.Reader, out io.Writer, args []string) error {
-	return mkdir(in, out, args, os.MkdirAll)
-}
-
-func mkdir(in io.Reader, out io.Writer, args []string, mkdirFunc func(string, os.FileMode) error) error {
-	if len(args) < 2 {
-		return ErrInvalid
-	}
-
-	perm, err := strconv.ParseUint(args[1], 8, 32)
-	if err != nil {
-		return err
-	}
-	return mkdirFunc(args[0], os.FileMode(perm))
-}
-
-// Remove works like os.Remove
-// Args:
-//	- args[0] is the path
-func Remove(in io.Reader, out io.Writer, args []string) error {
-	return remove(in, out, args, os.Remove)
-}
-
-// RemoveAll works like os.RemoveAll
-// Args:
-//  - args[0] is the path
-func RemoveAll(in io.Reader, out io.Writer, args []string) error {
-	return remove(in, out, args, os.RemoveAll)
-}
-
-func remove(in io.Reader, out io.Writer, args []string, removefunc func(string) error) error {
-	if len(args) < 1 {
-		return ErrInvalid
-	}
-	return removefunc(args[0])
-}
-
-// Link works like os.Link
-// Args:
-//  - args[0] = old path name (link source)
-//  - args[1] = new path name (link dest)
-func Link(in io.Reader, out io.Writer, args []string) error {
-	return link(in, out, args, os.Link)
-}
-
-// Symlink works like os.Symlink
-// Args:
-//  - args[0] = old path name (link source)
-//  - args[1] = new path name (link dest)
-func Symlink(in io.Reader, out io.Writer, args []string) error {
-	return link(in, out, args, os.Symlink)
-}
-
-func link(in io.Reader, out io.Writer, args []string, linkfunc func(string, string) error) error {
-	if len(args) < 2 {
-		return ErrInvalid
-	}
-	return linkfunc(args[0], args[1])
-}
-
-// Lchmod changes permission of the given file without following symlinks
-// Args:
-//  - args[0] = path
-//  - args[1] = permission mode in octal (like 0755)
-func Lchmod(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 2 {
-		return ErrInvalid
-	}
-
-	perm, err := strconv.ParseUint(args[1], 8, 32)
-	if err != nil {
-		return err
-	}
-
-	path := args[0]
-	if !filepath.IsAbs(path) {
-		path, err = filepath.Abs(path)
-		if err != nil {
-			return err
-		}
-	}
-	return unix.Fchmodat(0, path, uint32(perm), unix.AT_SYMLINK_NOFOLLOW)
-}
-
-// Lchown works like os.Lchown
-// Args:
-//  - args[0] = path
-//  - args[1] = uid in base 10
-//  - args[2] = gid in base 10
-func Lchown(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 3 {
-		return ErrInvalid
-	}
-
-	uid, err := strconv.ParseInt(args[1], 10, 64)
-	if err != nil {
-		return err
-	}
-
-	gid, err := strconv.ParseInt(args[2], 10, 64)
-	if err != nil {
-		return err
-	}
-	return os.Lchown(args[0], int(uid), int(gid))
-}
-
-// Mknod works like syscall.Mknod
-// Args:
-//  - args[0] = path
-//  - args[1] = permission mode in octal (like 0755)
-//  - args[2] = major device number in base 10
-//  - args[3] = minor device number in base 10
-func Mknod(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 4 {
-		return ErrInvalid
-	}
-
-	perm, err := strconv.ParseUint(args[1], 8, 32)
-	if err != nil {
-		return err
-	}
-
-	major, err := strconv.ParseInt(args[2], 10, 32)
-	if err != nil {
-		return err
-	}
-
-	minor, err := strconv.ParseInt(args[3], 10, 32)
-	if err != nil {
-		return err
-	}
-
-	dev := unix.Mkdev(uint32(major), uint32(minor))
-	return unix.Mknod(args[0], uint32(perm), int(dev))
-}
-
-// Mkfifo creates a FIFO special file with the given path name and permissions
-// Args:
-// 	- args[0] = path
-//  - args[1] = permission mode in octal (like 0755)
-func Mkfifo(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 2 {
-		return ErrInvalid
-	}
-
-	perm, err := strconv.ParseUint(args[1], 8, 32)
-	if err != nil {
-		return err
-	}
-	return unix.Mkfifo(args[0], uint32(perm))
-}
-
-// OpenFile works like os.OpenFile. To manage the file pointer state,
-// this function acts as a single file "file server" with Read/Write/Close
-// being serialized control codes from in.
-// Args:
-//  - args[0] = path
-//  - args[1] = flag in base 10
-//  - args[2] = permission mode in octal (like 0755)
-func OpenFile(in io.Reader, out io.Writer, args []string) (err error) {
-	logrus.Debugf("OpenFile: %v", args)
-
-	defer func() {
-		if err != nil {
-			logrus.Errorf("OpenFile: return is non-nil, so writing cmdFailed back: %v", err)
-			// error code will be serialized by the caller, so don't write it here
-			WriteFileHeader(out, &FileHeader{Cmd: CmdFailed}, nil)
-		}
-	}()
-
-	if len(args) < 3 {
-		logrus.Errorf("OpenFile: Not enough parameters")
-		return ErrInvalid
-	}
-
-	flag, err := strconv.ParseInt(args[1], 10, 32)
-	if err != nil {
-		logrus.Errorf("OpenFile: Invalid flag: %v", err)
-		return err
-	}
-
-	perm, err := strconv.ParseUint(args[2], 8, 32)
-	if err != nil {
-		logrus.Errorf("OpenFile: Invalid permission: %v", err)
-		return err
-	}
-
-	f, err := os.OpenFile(args[0], int(flag), os.FileMode(perm))
-	if err != nil {
-		logrus.Errorf("OpenFile: Failed to open: %v", err)
-		return err
-	}
-
-	// Signal the client that OpenFile succeeded
-	logrus.Debugf("OpenFile: Sending OK header")
-	if err := WriteFileHeader(out, &FileHeader{Cmd: CmdOK}, nil); err != nil {
-		return err
-	}
-
-	for {
-		logrus.Debugf("OpenFile: reading header")
-		hdr, err := ReadFileHeader(in)
-		if err != nil {
-			logrus.Errorf("OpenFile: Failed to ReadFileHeader: %v", err)
-			return err
-		}
-		logrus.Debugf("OpenFile: Header: %+v", hdr)
-
-		var buf []byte
-		switch hdr.Cmd {
-		case Read:
-			logrus.Debugf("OpenFile: Read command")
-			buf = make([]byte, hdr.Size, hdr.Size)
-			n, err := f.Read(buf)
-			logrus.Debugf("OpenFile: Issued a read for %d, got %d bytes and error %v", hdr.Size, n, err)
-			if err != nil {
-				logrus.Errorf("OpenFile: Read failed: %v", err)
-				return err
-			}
-			buf = buf[:n]
-		case Write:
-			logrus.Debugf("OpenFile: Write command")
-			if _, err := io.CopyN(f, in, int64(hdr.Size)); err != nil {
-				logrus.Errorf("OpenFile: Write CopyN() failed: %v", err)
-				return err
-			}
-		case Seek:
-			logrus.Debugf("OpenFile: Seek command")
-			seekHdr := &SeekHeader{}
-			if err := binary.Read(in, binary.BigEndian, seekHdr); err != nil {
-				logrus.Errorf("OpenFile: Seek Read() failed: %v", err)
-				return err
-			}
-			res, err := f.Seek(seekHdr.Offset, int(seekHdr.Whence))
-			if err != nil {
-				logrus.Errorf("OpenFile: Seek Seek() failed: %v", err)
-				return err
-			}
-			buffer := &bytes.Buffer{}
-			if err := binary.Write(buffer, binary.BigEndian, res); err != nil {
-				logrus.Errorf("OpenFile: Seek Write() failed: %v", err)
-				return err
-			}
-			buf = buffer.Bytes()
-		case Close:
-			logrus.Debugf("OpenFile: Close command")
-			if err := f.Close(); err != nil {
-				return err
-			}
-		default:
-			logrus.Errorf("OpenFile: unknown command")
-			return ErrUnknown
-		}
-
-		logrus.Debugf("OpenFile: Writing back OK header of size %d", len(buf))
-		retHdr := &FileHeader{
-			Cmd:  CmdOK,
-			Size: uint64(len(buf)),
-		}
-		if err := WriteFileHeader(out, retHdr, buf); err != nil {
-			logrus.Errorf("OpenFile: WriteFileHeader() failed: %v", err)
-			return err
-		}
-
-		if hdr.Cmd == Close {
-			break
-		}
-	}
-	logrus.Debugf("OpenFile: Done, no error")
-	return nil
-}
-
-// ReadFile works like ioutil.ReadFile but instead writes the file to a writer
-// Args:
-//  - args[0] = path
-// Out:
-//  - Write file contents to out
-func ReadFile(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 1 {
-		return ErrInvalid
-	}
-
-	f, err := os.Open(args[0])
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	if _, err := io.Copy(out, f); err != nil {
-		return nil
-	}
-	return nil
-}
-
-// WriteFile works like ioutil.WriteFile but instead reads the file from a reader
-// Args:
-//  - args[0] = path
-//  - args[1] = permission mode in octal (like 0755)
-//  - input data stream from in
-func WriteFile(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 2 {
-		return ErrInvalid
-	}
-
-	perm, err := strconv.ParseUint(args[1], 8, 32)
-	if err != nil {
-		return err
-	}
-
-	f, err := os.OpenFile(args[0], os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm))
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	if _, err := io.Copy(f, in); err != nil {
-		return err
-	}
-	return nil
-}
-
-// ReadDir works like *os.File.Readdir but instead writes the result to a writer
-// Args:
-//  - args[0] = path
-//  - args[1] = number of directory entries to return. If <= 0, return all entries in directory
-func ReadDir(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 2 {
-		return ErrInvalid
-	}
-
-	n, err := strconv.ParseInt(args[1], 10, 32)
-	if err != nil {
-		return err
-	}
-
-	f, err := os.Open(args[0])
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	infos, err := f.Readdir(int(n))
-	if err != nil {
-		return err
-	}
-
-	fileInfos := make([]FileInfo, len(infos))
-	for i := range infos {
-		fileInfos[i] = FileInfo{
-			NameVar:    infos[i].Name(),
-			SizeVar:    infos[i].Size(),
-			ModeVar:    infos[i].Mode(),
-			ModTimeVar: infos[i].ModTime().UnixNano(),
-			IsDirVar:   infos[i].IsDir(),
-		}
-	}
-
-	buf, err := json.Marshal(fileInfos)
-	if err != nil {
-		return err
-	}
-
-	if _, err := out.Write(buf); err != nil {
-		return err
-	}
-	return nil
-}
-
-// ResolvePath works like docker's symlink.FollowSymlinkInScope.
-// It takens in a `path` and a `root` and evaluates symlinks in `path`
-// as if they were scoped in `root`. `path` must be a child path of `root`.
-// In other words, `path` must have `root` as a prefix.
-// Example:
-// path=/foo/bar -> /baz
-// root=/foo,
-// Expected result = /foo/baz
-//
-// Args:
-// - args[0] is `path`
-// - args[1] is `root`
-// Out:
-// - Write resolved path to stdout
-func ResolvePath(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 2 {
-		return ErrInvalid
-	}
-	res, err := symlink.FollowSymlinkInScope(args[0], args[1])
-	if err != nil {
-		return err
-	}
-	if _, err = out.Write([]byte(res)); err != nil {
-		return err
-	}
-	return nil
-}
-
-// ExtractArchive extracts the archive read from in.
-// Args:
-// - in = size of json | json of archive.TarOptions | input tar stream
-// - args[0] = extract directory name
-func ExtractArchive(in io.Reader, out io.Writer, args []string) error {
-	logrus.Debugln("ExtractArchive:", args)
-	if len(args) < 1 {
-		logrus.Errorln("ExtractArchive: invalid args")
-		return ErrInvalid
-	}
-
-	opts, err := ReadTarOptions(in)
-	if err != nil {
-		logrus.Errorf("ExtractArchive: Failed to read tar options: %v", err)
-		return err
-	}
-
-	logrus.Debugf("ExtractArchive: Tar options: %+v", opts)
-	if err := archive.Untar(in, args[0], opts); err != nil {
-		logrus.Errorf("ExtractArchive: Failed to Untar: %v", err)
-		return err
-	}
-	logrus.Debugf("ExtractArchive: Success")
-	return nil
-}
-
-// ArchivePath archives the given directory and writes it to out.
-// Args:
-// - in = size of json | json of archive.TarOptions
-// - args[0] = source directory name
-// Out:
-// - out = tar file of the archive
-func ArchivePath(in io.Reader, out io.Writer, args []string) error {
-	if len(args) < 1 {
-		return ErrInvalid
-	}
-
-	opts, err := ReadTarOptions(in)
-	if err != nil {
-		return err
-	}
-
-	r, err := archive.TarWithOptions(args[0], opts)
-	if err != nil {
-		return err
-	}
-
-	if _, err := io.Copy(out, r); err != nil {
-		return err
-	}
-	return nil
-}

+ 0 - 170
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go

@@ -1,170 +0,0 @@
-package remotefs
-
-import (
-	"bytes"
-	"encoding/binary"
-	"encoding/json"
-	"io"
-	"io/ioutil"
-	"os"
-	"syscall"
-
-	"github.com/docker/docker/pkg/archive"
-)
-
-// ReadError is an utility function that reads a serialized error from the given reader
-// and deserializes it.
-func ReadError(in io.Reader) (*ExportedError, error) {
-	b, err := ioutil.ReadAll(in)
-	if err != nil {
-		return nil, err
-	}
-
-	// No error
-	if len(b) == 0 {
-		return nil, nil
-	}
-
-	var exportedErr ExportedError
-	if err := json.Unmarshal(b, &exportedErr); err != nil {
-		return nil, err
-	}
-
-	return &exportedErr, nil
-}
-
-// ExportedToError will convert a ExportedError to an error. It will try to match
-// the error to any existing known error like os.ErrNotExist. Otherwise, it will just
-// return an implementation of the error interface.
-func ExportedToError(ee *ExportedError) error {
-	if ee.Error() == os.ErrNotExist.Error() {
-		return os.ErrNotExist
-	} else if ee.Error() == os.ErrExist.Error() {
-		return os.ErrExist
-	} else if ee.Error() == os.ErrPermission.Error() {
-		return os.ErrPermission
-	} else if ee.Error() == io.EOF.Error() {
-		return io.EOF
-	}
-	return ee
-}
-
-// WriteError is an utility function that serializes the error
-// and writes it to the output writer.
-func WriteError(err error, out io.Writer) error {
-	if err == nil {
-		return nil
-	}
-	err = fixOSError(err)
-
-	var errno int
-	switch typedError := err.(type) {
-	case *os.PathError:
-		if se, ok := typedError.Err.(syscall.Errno); ok {
-			errno = int(se)
-		}
-	case *os.LinkError:
-		if se, ok := typedError.Err.(syscall.Errno); ok {
-			errno = int(se)
-		}
-	case *os.SyscallError:
-		if se, ok := typedError.Err.(syscall.Errno); ok {
-			errno = int(se)
-		}
-	}
-
-	exportedError := &ExportedError{
-		ErrString: err.Error(),
-		ErrNum:    errno,
-	}
-
-	b, err1 := json.Marshal(exportedError)
-	if err1 != nil {
-		return err1
-	}
-
-	_, err1 = out.Write(b)
-	if err1 != nil {
-		return err1
-	}
-	return nil
-}
-
-// fixOSError converts possible platform dependent error into the portable errors in the
-// Go os package if possible.
-func fixOSError(err error) error {
-	// The os.IsExist, os.IsNotExist, and os.IsPermissions functions are platform
-	// dependent, so sending the raw error might break those functions on a different OS.
-	// Go defines portable errors for these.
-	if os.IsExist(err) {
-		return os.ErrExist
-	} else if os.IsNotExist(err) {
-		return os.ErrNotExist
-	} else if os.IsPermission(err) {
-		return os.ErrPermission
-	}
-	return err
-}
-
-// ReadTarOptions reads from the specified reader and deserializes an archive.TarOptions struct.
-func ReadTarOptions(r io.Reader) (*archive.TarOptions, error) {
-	var size uint64
-	if err := binary.Read(r, binary.BigEndian, &size); err != nil {
-		return nil, err
-	}
-
-	rawJSON := make([]byte, size)
-	if _, err := io.ReadFull(r, rawJSON); err != nil {
-		return nil, err
-	}
-
-	var opts archive.TarOptions
-	if err := json.Unmarshal(rawJSON, &opts); err != nil {
-		return nil, err
-	}
-	return &opts, nil
-}
-
-// WriteTarOptions serializes a archive.TarOptions struct and writes it to the writer.
-func WriteTarOptions(w io.Writer, opts *archive.TarOptions) error {
-	optsBuf, err := json.Marshal(opts)
-	if err != nil {
-		return err
-	}
-
-	optsSize := uint64(len(optsBuf))
-	optsSizeBuf := &bytes.Buffer{}
-	if err := binary.Write(optsSizeBuf, binary.BigEndian, optsSize); err != nil {
-		return err
-	}
-
-	if _, err := optsSizeBuf.WriteTo(w); err != nil {
-		return err
-	}
-
-	if _, err := w.Write(optsBuf); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// ReadFileHeader reads from r and returns a deserialized FileHeader
-func ReadFileHeader(r io.Reader) (*FileHeader, error) {
-	hdr := &FileHeader{}
-	if err := binary.Read(r, binary.BigEndian, hdr); err != nil {
-		return nil, err
-	}
-	return hdr, nil
-}
-
-// WriteFileHeader serializes a FileHeader and writes it to w, along with any extra data
-func WriteFileHeader(w io.Writer, hdr *FileHeader, extraData []byte) error {
-	if err := binary.Write(w, binary.BigEndian, hdr); err != nil {
-		return err
-	}
-	if _, err := w.Write(extraData); err != nil {
-		return err
-	}
-	return nil
-}