|
@@ -65,12 +65,14 @@ import (
|
|
|
"strconv"
|
|
|
"strings"
|
|
|
"sync"
|
|
|
+ "syscall"
|
|
|
"time"
|
|
|
|
|
|
"github.com/Microsoft/hcsshim"
|
|
|
"github.com/Microsoft/opengcs/client"
|
|
|
"github.com/docker/docker/daemon/graphdriver"
|
|
|
"github.com/docker/docker/pkg/archive"
|
|
|
+ "github.com/docker/docker/pkg/containerfs"
|
|
|
"github.com/docker/docker/pkg/idtools"
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
|
|
"github.com/docker/docker/pkg/system"
|
|
@@ -106,72 +108,24 @@ const (
|
|
|
|
|
|
// scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs
|
|
|
scratchDirectory = "scratch"
|
|
|
-)
|
|
|
-
|
|
|
-// cacheItem is our internal structure representing an item in our local cache
|
|
|
-// of things that have been mounted.
|
|
|
-type cacheItem struct {
|
|
|
- sync.Mutex // Protects operations performed on this item
|
|
|
- uvmPath string // Path in utility VM
|
|
|
- hostPath string // Path on host
|
|
|
- refCount int // How many times its been mounted
|
|
|
- isSandbox bool // True if a sandbox
|
|
|
- isMounted bool // True when mounted in a service VM
|
|
|
-}
|
|
|
-
|
|
|
-// setIsMounted is a helper function for a cacheItem which does exactly what it says
|
|
|
-func (ci *cacheItem) setIsMounted() {
|
|
|
- logrus.Debugf("locking cache item for set isMounted")
|
|
|
- ci.Lock()
|
|
|
- defer ci.Unlock()
|
|
|
- ci.isMounted = true
|
|
|
- logrus.Debugf("set isMounted on cache item")
|
|
|
-}
|
|
|
|
|
|
-// incrementRefCount is a helper function for a cacheItem which does exactly what it says
|
|
|
-func (ci *cacheItem) incrementRefCount() {
|
|
|
- logrus.Debugf("locking cache item for increment")
|
|
|
- ci.Lock()
|
|
|
- defer ci.Unlock()
|
|
|
- ci.refCount++
|
|
|
- logrus.Debugf("incremented refcount on cache item %+v", ci)
|
|
|
-}
|
|
|
-
|
|
|
-// decrementRefCount is a helper function for a cacheItem which does exactly what it says
|
|
|
-func (ci *cacheItem) decrementRefCount() int {
|
|
|
- logrus.Debugf("locking cache item for decrement")
|
|
|
- ci.Lock()
|
|
|
- defer ci.Unlock()
|
|
|
- ci.refCount--
|
|
|
- logrus.Debugf("decremented refcount on cache item %+v", ci)
|
|
|
- return ci.refCount
|
|
|
-}
|
|
|
-
|
|
|
-// serviceVMItem is our internal structure representing an item in our
|
|
|
-// map of service VMs we are maintaining.
|
|
|
-type serviceVMItem struct {
|
|
|
- sync.Mutex // Serialises operations being performed in this service VM.
|
|
|
- scratchAttached bool // Has a scratch been attached?
|
|
|
- config *client.Config // Represents the service VM item.
|
|
|
-}
|
|
|
+ // errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending.
|
|
|
+ errOperationPending syscall.Errno = 0xc0370103
|
|
|
+)
|
|
|
|
|
|
// Driver represents an LCOW graph driver.
|
|
|
type Driver struct {
|
|
|
- dataRoot string // Root path on the host where we are storing everything.
|
|
|
- cachedSandboxFile string // Location of the local default-sized cached sandbox.
|
|
|
- cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox.
|
|
|
- cachedScratchFile string // Location of the local cached empty scratch space.
|
|
|
- cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch.
|
|
|
- options []string // Graphdriver options we are initialised with.
|
|
|
- serviceVmsMutex sync.Mutex // Protects add/updates/delete to the serviceVMs map.
|
|
|
- serviceVms map[string]*serviceVMItem // Map of the configs representing the service VM(s) we are running.
|
|
|
- globalMode bool // Indicates if running in an unsafe/global service VM mode.
|
|
|
+ dataRoot string // Root path on the host where we are storing everything.
|
|
|
+ cachedSandboxFile string // Location of the local default-sized cached sandbox.
|
|
|
+ cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox.
|
|
|
+ cachedScratchFile string // Location of the local cached empty scratch space.
|
|
|
+ cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch.
|
|
|
+ options []string // Graphdriver options we are initialised with.
|
|
|
+ globalMode bool // Indicates if running in an unsafe/global service VM mode.
|
|
|
|
|
|
// NOTE: It is OK to use a cache here because Windows does not support
|
|
|
// restoring containers when the daemon dies.
|
|
|
-
|
|
|
- cacheMutex sync.Mutex // Protects add/update/deletes to cache.
|
|
|
- cache map[string]*cacheItem // Map holding a cache of all the IDs we've mounted/unmounted.
|
|
|
+ serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running.
|
|
|
}
|
|
|
|
|
|
// layerDetails is the structure returned by a helper function `getLayerDetails`
|
|
@@ -204,9 +158,10 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd
|
|
|
options: options,
|
|
|
cachedSandboxFile: filepath.Join(cd, sandboxFilename),
|
|
|
cachedScratchFile: filepath.Join(cd, scratchFilename),
|
|
|
- cache: make(map[string]*cacheItem),
|
|
|
- serviceVms: make(map[string]*serviceVMItem),
|
|
|
- globalMode: false,
|
|
|
+ serviceVms: &serviceVMMap{
|
|
|
+ svms: make(map[string]*serviceVMMapItem),
|
|
|
+ },
|
|
|
+ globalMode: false,
|
|
|
}
|
|
|
|
|
|
// Looks for relevant options
|
|
@@ -248,53 +203,59 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd
|
|
|
return d, nil
|
|
|
}
|
|
|
|
|
|
+func (d *Driver) getVMID(id string) string {
|
|
|
+ if d.globalMode {
|
|
|
+ return svmGlobalID
|
|
|
+ }
|
|
|
+ return id
|
|
|
+}
|
|
|
+
|
|
|
// startServiceVMIfNotRunning starts a service utility VM if it is not currently running.
|
|
|
// It can optionally be started with a mapped virtual disk. Returns a opengcs config structure
|
|
|
// representing the VM.
|
|
|
-func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedVirtualDisk, context string) (*serviceVMItem, error) {
|
|
|
+func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) {
|
|
|
// Use the global ID if in global mode
|
|
|
- if d.globalMode {
|
|
|
- id = svmGlobalID
|
|
|
- }
|
|
|
+ id = d.getVMID(id)
|
|
|
|
|
|
title := fmt.Sprintf("lcowdriver: startservicevmifnotrunning %s:", id)
|
|
|
|
|
|
- // Make sure thread-safe when interrogating the map
|
|
|
- logrus.Debugf("%s taking serviceVmsMutex", title)
|
|
|
- d.serviceVmsMutex.Lock()
|
|
|
-
|
|
|
- // Nothing to do if it's already running except add the mapped drive if supplied.
|
|
|
- if svm, ok := d.serviceVms[id]; ok {
|
|
|
- logrus.Debugf("%s exists, releasing serviceVmsMutex", title)
|
|
|
- d.serviceVmsMutex.Unlock()
|
|
|
-
|
|
|
- if mvdToAdd != nil {
|
|
|
- logrus.Debugf("hot-adding %s to %s", mvdToAdd.HostPath, mvdToAdd.ContainerPath)
|
|
|
-
|
|
|
- // Ensure the item is locked while doing this
|
|
|
- logrus.Debugf("%s locking serviceVmItem %s", title, svm.config.Name)
|
|
|
- svm.Lock()
|
|
|
-
|
|
|
- if err := svm.config.HotAddVhd(mvdToAdd.HostPath, mvdToAdd.ContainerPath, false, true); err != nil {
|
|
|
- logrus.Debugf("%s releasing serviceVmItem %s on hot-add failure %s", title, svm.config.Name, err)
|
|
|
- svm.Unlock()
|
|
|
- return nil, fmt.Errorf("%s hot add %s to %s failed: %s", title, mvdToAdd.HostPath, mvdToAdd.ContainerPath, err)
|
|
|
- }
|
|
|
+ // Attempt to add ID to the service vm map
|
|
|
+ logrus.Debugf("%s: Adding entry to service vm map", title)
|
|
|
+ svm, exists, err := d.serviceVms.add(id)
|
|
|
+ if err != nil && err == errVMisTerminating {
|
|
|
+ // VM is in the process of terminating. Wait until it's done and and then try again
|
|
|
+ logrus.Debugf("%s: VM with current ID still in the process of terminating: %s", title, id)
|
|
|
+ if err := svm.getStopError(); err != nil {
|
|
|
+ logrus.Debugf("%s: VM %s did not stop succesfully: %s", title, id, err)
|
|
|
+ return nil, err
|
|
|
+ }
|
|
|
+ return d.startServiceVMIfNotRunning(id, mvdToAdd, context)
|
|
|
+ } else if err != nil {
|
|
|
+ logrus.Debugf("%s: failed to add service vm to map: %s", err)
|
|
|
+ return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err)
|
|
|
+ }
|
|
|
|
|
|
- logrus.Debugf("%s releasing serviceVmItem %s", title, svm.config.Name)
|
|
|
- svm.Unlock()
|
|
|
+ if exists {
|
|
|
+ // Service VM is already up and running. In this case, just hot add the vhds.
|
|
|
+ logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd)
|
|
|
+ if err := svm.hotAddVHDs(mvdToAdd...); err != nil {
|
|
|
+ logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err)
|
|
|
+ return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err)
|
|
|
}
|
|
|
return svm, nil
|
|
|
}
|
|
|
|
|
|
- // Release the lock early
|
|
|
- logrus.Debugf("%s releasing serviceVmsMutex", title)
|
|
|
- d.serviceVmsMutex.Unlock()
|
|
|
+ // We are the first service for this id, so we need to start it
|
|
|
+ logrus.Debugf("%s: service vm doesn't exist. Now starting it up: %s", title, id)
|
|
|
|
|
|
- // So we are starting one. First need an enpty structure.
|
|
|
- svm := &serviceVMItem{
|
|
|
- config: &client.Config{},
|
|
|
- }
|
|
|
+ defer func() {
|
|
|
+ // Signal that start has finished, passing in the error if any.
|
|
|
+ svm.signalStartFinished(err)
|
|
|
+ if err != nil {
|
|
|
+ // We added a ref to the VM, since we failed, we should delete the ref.
|
|
|
+ d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false)
|
|
|
+ }
|
|
|
+ }()
|
|
|
|
|
|
// Generate a default configuration
|
|
|
if err := svm.config.GenerateDefault(d.options); err != nil {
|
|
@@ -335,12 +296,14 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedV
|
|
|
svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd)
|
|
|
svm.scratchAttached = true
|
|
|
}
|
|
|
+
|
|
|
logrus.Debugf("%s releasing cachedScratchMutex", title)
|
|
|
d.cachedScratchMutex.Unlock()
|
|
|
|
|
|
// If requested to start it with a mapped virtual disk, add it now.
|
|
|
- if mvdToAdd != nil {
|
|
|
- svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, *mvdToAdd)
|
|
|
+ svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvdToAdd...)
|
|
|
+ for _, mvd := range svm.config.MappedVirtualDisks {
|
|
|
+ svm.attachedVHDs[mvd.HostPath] = 1
|
|
|
}
|
|
|
|
|
|
// Start it.
|
|
@@ -349,108 +312,80 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedV
|
|
|
return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err)
|
|
|
}
|
|
|
|
|
|
- // As it's now running, add it to the map, checking for a race where another
|
|
|
- // thread has simultaneously tried to start it.
|
|
|
- logrus.Debugf("%s locking serviceVmsMutex for insertion", title)
|
|
|
- d.serviceVmsMutex.Lock()
|
|
|
- if svm, ok := d.serviceVms[id]; ok {
|
|
|
- logrus.Debugf("%s releasing serviceVmsMutex after insertion but exists", title)
|
|
|
- d.serviceVmsMutex.Unlock()
|
|
|
- return svm, nil
|
|
|
- }
|
|
|
- d.serviceVms[id] = svm
|
|
|
- logrus.Debugf("%s releasing serviceVmsMutex after insertion", title)
|
|
|
- d.serviceVmsMutex.Unlock()
|
|
|
+ // defer function to terminate the VM if the next steps fail
|
|
|
+ defer func() {
|
|
|
+ if err != nil {
|
|
|
+ waitTerminate(svm, fmt.Sprintf("startServiceVmIfNotRunning: %s (%s)", id, context))
|
|
|
+ }
|
|
|
+ }()
|
|
|
|
|
|
// Now we have a running service VM, we can create the cached scratch file if it doesn't exist.
|
|
|
logrus.Debugf("%s locking cachedScratchMutex", title)
|
|
|
d.cachedScratchMutex.Lock()
|
|
|
if _, err := os.Stat(d.cachedScratchFile); err != nil {
|
|
|
- logrus.Debugf("%s (%s): creating an SVM scratch - locking serviceVM", title, context)
|
|
|
- svm.Lock()
|
|
|
+ logrus.Debugf("%s (%s): creating an SVM scratch", title, context)
|
|
|
+
|
|
|
+ // Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup,
|
|
|
+ // but we're still in that process right now.
|
|
|
if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil {
|
|
|
- logrus.Debugf("%s (%s): releasing serviceVM on error path from CreateExt4Vhdx: %s", title, context, err)
|
|
|
- svm.Unlock()
|
|
|
logrus.Debugf("%s (%s): releasing cachedScratchMutex on error path", title, context)
|
|
|
d.cachedScratchMutex.Unlock()
|
|
|
-
|
|
|
- // Do a force terminate and remove it from the map on failure, ignoring any errors
|
|
|
- if err2 := d.terminateServiceVM(id, "error path from CreateExt4Vhdx", true); err2 != nil {
|
|
|
- logrus.Warnf("failed to terminate service VM on error path from CreateExt4Vhdx: %s", err2)
|
|
|
- }
|
|
|
-
|
|
|
+ logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err)
|
|
|
return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err)
|
|
|
}
|
|
|
- logrus.Debugf("%s (%s): releasing serviceVM after %s created and cached to %s", title, context, scratchTargetFile, d.cachedScratchFile)
|
|
|
- svm.Unlock()
|
|
|
}
|
|
|
logrus.Debugf("%s (%s): releasing cachedScratchMutex", title, context)
|
|
|
d.cachedScratchMutex.Unlock()
|
|
|
|
|
|
// Hot-add the scratch-space if not already attached
|
|
|
if !svm.scratchAttached {
|
|
|
- logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s - locking serviceVM", context, scratchTargetFile)
|
|
|
- svm.Lock()
|
|
|
- if err := svm.config.HotAddVhd(scratchTargetFile, toolsScratchPath, false, true); err != nil {
|
|
|
- logrus.Debugf("%s (%s): releasing serviceVM on error path of HotAddVhd: %s", title, context, err)
|
|
|
- svm.Unlock()
|
|
|
-
|
|
|
- // Do a force terminate and remove it from the map on failure, ignoring any errors
|
|
|
- if err2 := d.terminateServiceVM(id, "error path from HotAddVhd", true); err2 != nil {
|
|
|
- logrus.Warnf("failed to terminate service VM on error path from HotAddVhd: %s", err2)
|
|
|
- }
|
|
|
-
|
|
|
+ logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s", context, scratchTargetFile)
|
|
|
+ if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{
|
|
|
+ HostPath: scratchTargetFile,
|
|
|
+ ContainerPath: toolsScratchPath,
|
|
|
+ CreateInUtilityVM: true,
|
|
|
+ }); err != nil {
|
|
|
+ logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err)
|
|
|
return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err)
|
|
|
}
|
|
|
- logrus.Debugf("%s (%s): releasing serviceVM", title, context)
|
|
|
- svm.Unlock()
|
|
|
+ svm.scratchAttached = true
|
|
|
}
|
|
|
|
|
|
logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) success", context)
|
|
|
return svm, nil
|
|
|
}
|
|
|
|
|
|
-// getServiceVM returns the appropriate service utility VM instance, optionally
|
|
|
-// deleting it from the map (but not the global one)
|
|
|
-func (d *Driver) getServiceVM(id string, deleteFromMap bool) (*serviceVMItem, error) {
|
|
|
- logrus.Debugf("lcowdriver: getservicevm:locking serviceVmsMutex")
|
|
|
- d.serviceVmsMutex.Lock()
|
|
|
- defer func() {
|
|
|
- logrus.Debugf("lcowdriver: getservicevm:releasing serviceVmsMutex")
|
|
|
- d.serviceVmsMutex.Unlock()
|
|
|
- }()
|
|
|
- if d.globalMode {
|
|
|
- id = svmGlobalID
|
|
|
- }
|
|
|
- if _, ok := d.serviceVms[id]; !ok {
|
|
|
- return nil, fmt.Errorf("getservicevm for %s failed as not found", id)
|
|
|
- }
|
|
|
- svm := d.serviceVms[id]
|
|
|
- if deleteFromMap && id != svmGlobalID {
|
|
|
- logrus.Debugf("lcowdriver: getservicevm: removing %s from map", id)
|
|
|
- delete(d.serviceVms, id)
|
|
|
+// terminateServiceVM terminates a service utility VM if its running if it's,
|
|
|
+// not being used by any goroutine, but does nothing when in global mode as it's
|
|
|
+// lifetime is limited to that of the daemon. If the force flag is set, then
|
|
|
+// the VM will be killed regardless of the ref count or if it's global.
|
|
|
+func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) {
|
|
|
+ // We don't do anything in safe mode unless the force flag has been passed, which
|
|
|
+ // is only the case for cleanup at driver termination.
|
|
|
+ if d.globalMode && !force {
|
|
|
+ logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
|
|
|
+ return nil
|
|
|
}
|
|
|
- return svm, nil
|
|
|
-}
|
|
|
|
|
|
-// terminateServiceVM terminates a service utility VM if its running, but does nothing
|
|
|
-// when in global mode as it's lifetime is limited to that of the daemon.
|
|
|
-func (d *Driver) terminateServiceVM(id, context string, force bool) error {
|
|
|
+ id = d.getVMID(id)
|
|
|
|
|
|
- // We don't do anything in safe mode unless the force flag has been passed, which
|
|
|
- // is only the case for cleanup at driver termination.
|
|
|
- if d.globalMode {
|
|
|
- if !force {
|
|
|
- logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
|
|
|
- return nil
|
|
|
- }
|
|
|
- id = svmGlobalID
|
|
|
+ var svm *serviceVM
|
|
|
+ var lastRef bool
|
|
|
+ if !force {
|
|
|
+ // In the not force case, we ref count
|
|
|
+ svm, lastRef, err = d.serviceVms.decrementRefCount(id)
|
|
|
+ } else {
|
|
|
+ // In the force case, we ignore the ref count and just set it to 0
|
|
|
+ svm, err = d.serviceVms.setRefCountZero(id)
|
|
|
+ lastRef = true
|
|
|
}
|
|
|
|
|
|
- // Get the service VM and delete it from the map
|
|
|
- svm, err := d.getServiceVM(id, true)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
+ if err == errVMUnknown {
|
|
|
+ return nil
|
|
|
+ } else if err == errVMisTerminating {
|
|
|
+ return svm.getStopError()
|
|
|
+ } else if !lastRef {
|
|
|
+ return nil
|
|
|
}
|
|
|
|
|
|
// We run the deletion of the scratch as a deferred function to at least attempt
|
|
@@ -459,26 +394,64 @@ func (d *Driver) terminateServiceVM(id, context string, force bool) error {
|
|
|
if svm.scratchAttached {
|
|
|
scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id))
|
|
|
logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile)
|
|
|
- if err := os.Remove(scratchTargetFile); err != nil {
|
|
|
- logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, err)
|
|
|
+ if errRemove := os.Remove(scratchTargetFile); errRemove != nil {
|
|
|
+ logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove)
|
|
|
+ err = errRemove
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ // This function shouldn't actually return error unless there is a bug
|
|
|
+ if errDelete := d.serviceVms.deleteID(id); errDelete != nil {
|
|
|
+ logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete)
|
|
|
+ }
|
|
|
+
|
|
|
+ // Signal that this VM has stopped
|
|
|
+ svm.signalStopFinished(err)
|
|
|
}()
|
|
|
|
|
|
- // Nothing to do if it's not running
|
|
|
- if svm.config.Uvm != nil {
|
|
|
- logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - calling terminate", id, context)
|
|
|
- if err := svm.config.Uvm.Terminate(); err != nil {
|
|
|
- return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
|
|
|
+ // Now it's possible that the serivce VM failed to start and now we are trying to termiante it.
|
|
|
+ // In this case, we will relay the error to the goroutines waiting for this vm to stop.
|
|
|
+ if err := svm.getStartError(); err != nil {
|
|
|
+ logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err)
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context)
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+func waitTerminate(svm *serviceVM, context string) error {
|
|
|
+ if svm.config == nil {
|
|
|
+ return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context)
|
|
|
+ }
|
|
|
+
|
|
|
+ logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context)
|
|
|
+ if err := svm.config.Uvm.Terminate(); err != nil {
|
|
|
+ // We might get operation still pending from the HCS. In that case, we shouldn't return
|
|
|
+ // an error since we call wait right after.
|
|
|
+ underlyingError := err
|
|
|
+ if conterr, ok := err.(*hcsshim.ContainerError); ok {
|
|
|
+ underlyingError = conterr.Err
|
|
|
}
|
|
|
|
|
|
- logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - waiting for utility VM to terminate", id, context)
|
|
|
- if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
|
|
|
- return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
|
|
|
+ if syscallErr, ok := underlyingError.(syscall.Errno); ok {
|
|
|
+ underlyingError = syscallErr
|
|
|
+ }
|
|
|
+
|
|
|
+ if underlyingError != errOperationPending {
|
|
|
+ return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
|
|
|
}
|
|
|
+ logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context)
|
|
|
}
|
|
|
|
|
|
- logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context)
|
|
|
+ logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context)
|
|
|
+ if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
|
|
|
+ return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
|
|
|
+ }
|
|
|
return nil
|
|
|
}
|
|
|
|
|
@@ -571,25 +544,18 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
|
|
|
}()
|
|
|
}
|
|
|
|
|
|
- // Synchronise the operation in the service VM.
|
|
|
- logrus.Debugf("%s: locking svm for sandbox creation", title)
|
|
|
- svm.Lock()
|
|
|
- defer func() {
|
|
|
- logrus.Debugf("%s: releasing svm for sandbox creation", title)
|
|
|
- svm.Unlock()
|
|
|
- }()
|
|
|
-
|
|
|
// Make sure we don't write to our local cached copy if this is for a non-default size request.
|
|
|
targetCacheFile := d.cachedSandboxFile
|
|
|
if sandboxSize != client.DefaultVhdxSizeGB {
|
|
|
targetCacheFile = ""
|
|
|
}
|
|
|
|
|
|
- // Actually do the creation.
|
|
|
- if err := svm.config.CreateExt4Vhdx(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
|
|
|
+ // Create the ext4 vhdx
|
|
|
+ logrus.Debugf("%s: creating sandbox ext4 vhdx", title)
|
|
|
+ if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
|
|
|
+ logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err)
|
|
|
return err
|
|
|
}
|
|
|
-
|
|
|
return nil
|
|
|
}
|
|
|
|
|
@@ -638,6 +604,21 @@ func (d *Driver) Remove(id string) error {
|
|
|
layerPath := d.dir(id)
|
|
|
|
|
|
logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath)
|
|
|
+
|
|
|
+ // Unmount all the layers
|
|
|
+ err := d.Put(id)
|
|
|
+ if err != nil {
|
|
|
+ logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err)
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ // for non-global case just kill the vm
|
|
|
+ if !d.globalMode {
|
|
|
+ if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
|
|
|
return err
|
|
|
}
|
|
@@ -659,43 +640,24 @@ func (d *Driver) Remove(id string) error {
|
|
|
// For optimisation, we don't actually mount the filesystem (which in our
|
|
|
// case means [hot-]adding it to a service VM. But we track that and defer
|
|
|
// the actual adding to the point we need to access it.
|
|
|
-func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|
|
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
|
|
title := fmt.Sprintf("lcowdriver: get: %s", id)
|
|
|
logrus.Debugf(title)
|
|
|
|
|
|
- // Work out what we are working on
|
|
|
- ld, err := getLayerDetails(d.dir(id))
|
|
|
+ // Generate the mounts needed for the defered operation.
|
|
|
+ disks, err := d.getAllMounts(id)
|
|
|
if err != nil {
|
|
|
- logrus.Debugf("%s failed to get layer details from %s: %s", title, d.dir(id), err)
|
|
|
- return "", fmt.Errorf("%s failed to open layer or sandbox VHD to open in %s: %s", title, d.dir(id), err)
|
|
|
- }
|
|
|
- logrus.Debugf("%s %s, size %d, isSandbox %t", title, ld.filename, ld.size, ld.isSandbox)
|
|
|
-
|
|
|
- // Add item to cache, or update existing item, but ensure we have the
|
|
|
- // lock while updating items.
|
|
|
- logrus.Debugf("%s: locking cacheMutex", title)
|
|
|
- d.cacheMutex.Lock()
|
|
|
- var ci *cacheItem
|
|
|
- if item, ok := d.cache[id]; !ok {
|
|
|
- // The item is not currently in the cache.
|
|
|
- ci = &cacheItem{
|
|
|
- refCount: 1,
|
|
|
- isSandbox: ld.isSandbox,
|
|
|
- hostPath: ld.filename,
|
|
|
- uvmPath: fmt.Sprintf("/mnt/%s", id),
|
|
|
- isMounted: false, // we defer this as an optimisation
|
|
|
- }
|
|
|
- d.cache[id] = ci
|
|
|
- logrus.Debugf("%s: added cache item %+v", title, ci)
|
|
|
- } else {
|
|
|
- // Increment the reference counter in the cache.
|
|
|
- item.incrementRefCount()
|
|
|
+ logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
|
|
|
+ return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
|
|
|
}
|
|
|
- logrus.Debugf("%s: releasing cacheMutex", title)
|
|
|
- d.cacheMutex.Unlock()
|
|
|
|
|
|
- logrus.Debugf("%s %s success. %s: %+v: size %d", title, id, d.dir(id), ci, ld.size)
|
|
|
- return d.dir(id), nil
|
|
|
+ logrus.Debugf("%s: got layer mounts: %+v", title, disks)
|
|
|
+ return &lcowfs{
|
|
|
+ root: unionMountName(disks),
|
|
|
+ d: d,
|
|
|
+ mappedDisks: disks,
|
|
|
+ vmID: d.getVMID(id),
|
|
|
+ }, nil
|
|
|
}
|
|
|
|
|
|
// Put does the reverse of get. If there are no more references to
|
|
@@ -703,56 +665,45 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|
|
func (d *Driver) Put(id string) error {
|
|
|
title := fmt.Sprintf("lcowdriver: put: %s", id)
|
|
|
|
|
|
- logrus.Debugf("%s: locking cacheMutex", title)
|
|
|
- d.cacheMutex.Lock()
|
|
|
- item, ok := d.cache[id]
|
|
|
- if !ok {
|
|
|
- logrus.Debugf("%s: releasing cacheMutex on error path", title)
|
|
|
- d.cacheMutex.Unlock()
|
|
|
- return fmt.Errorf("%s possible ref-count error, or invalid id was passed to the graphdriver. Cannot handle id %s as it's not in the cache", title, id)
|
|
|
- }
|
|
|
-
|
|
|
- // Decrement the ref-count, and nothing more to do if still in use.
|
|
|
- if item.decrementRefCount() > 0 {
|
|
|
- logrus.Debugf("%s: releasing cacheMutex. Cache item is still in use", title)
|
|
|
- d.cacheMutex.Unlock()
|
|
|
+ // Get the service VM that we need to remove from
|
|
|
+ svm, err := d.serviceVms.get(d.getVMID(id))
|
|
|
+ if err == errVMUnknown {
|
|
|
return nil
|
|
|
+ } else if err == errVMisTerminating {
|
|
|
+ return svm.getStopError()
|
|
|
}
|
|
|
|
|
|
- // Remove from the cache map.
|
|
|
- delete(d.cache, id)
|
|
|
- logrus.Debugf("%s: releasing cacheMutex. Ref count on cache item has dropped to zero, removed from cache", title)
|
|
|
- d.cacheMutex.Unlock()
|
|
|
+ // Generate the mounts that Get() might have mounted
|
|
|
+ disks, err := d.getAllMounts(id)
|
|
|
+ if err != nil {
|
|
|
+ logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
|
|
|
+ return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
|
|
|
+ }
|
|
|
|
|
|
- // If we have done a mount and we are in global mode, then remove it. We don't
|
|
|
- // need to remove in safe mode as the service VM is going to be torn down anyway.
|
|
|
- if d.globalMode {
|
|
|
- logrus.Debugf("%s: locking cache item at zero ref-count", title)
|
|
|
- item.Lock()
|
|
|
- defer func() {
|
|
|
- logrus.Debugf("%s: releasing cache item at zero ref-count", title)
|
|
|
- item.Unlock()
|
|
|
- }()
|
|
|
- if item.isMounted {
|
|
|
- svm, err := d.getServiceVM(id, false)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
+ // Now, we want to perform the unmounts, hot-remove and stop the service vm.
|
|
|
+ // We want to go though all the steps even if we have an error to clean up properly
|
|
|
+ err = svm.deleteUnionMount(unionMountName(disks), disks...)
|
|
|
+ if err != nil {
|
|
|
+ logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err)
|
|
|
+ }
|
|
|
|
|
|
- logrus.Debugf("%s: Hot-Removing %s. Locking svm", title, item.hostPath)
|
|
|
- svm.Lock()
|
|
|
- if err := svm.config.HotRemoveVhd(item.hostPath); err != nil {
|
|
|
- logrus.Debugf("%s: releasing svm on error path", title)
|
|
|
- svm.Unlock()
|
|
|
- return fmt.Errorf("%s failed to hot-remove %s from global service utility VM: %s", title, item.hostPath, err)
|
|
|
- }
|
|
|
- logrus.Debugf("%s: releasing svm", title)
|
|
|
- svm.Unlock()
|
|
|
+ err1 := svm.hotRemoveVHDs(disks...)
|
|
|
+ if err1 != nil {
|
|
|
+ logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err)
|
|
|
+ if err == nil {
|
|
|
+ err = err1
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- logrus.Debugf("%s %s: refCount 0. %s (%s) completed successfully", title, id, item.hostPath, item.uvmPath)
|
|
|
- return nil
|
|
|
+ err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false)
|
|
|
+ if err1 != nil {
|
|
|
+ logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1)
|
|
|
+ if err == nil {
|
|
|
+ err = err1
|
|
|
+ }
|
|
|
+ }
|
|
|
+ logrus.Debugf("Put succeeded on id %s", id)
|
|
|
+ return err
|
|
|
}
|
|
|
|
|
|
// Cleanup ensures the information the driver stores is properly removed.
|
|
@@ -761,15 +712,6 @@ func (d *Driver) Put(id string) error {
|
|
|
func (d *Driver) Cleanup() error {
|
|
|
title := "lcowdriver: cleanup"
|
|
|
|
|
|
- d.cacheMutex.Lock()
|
|
|
- for k, v := range d.cache {
|
|
|
- logrus.Debugf("%s cache item: %s: %+v", title, k, v)
|
|
|
- if v.refCount > 0 {
|
|
|
- logrus.Warnf("%s leaked %s: %+v", title, k, v)
|
|
|
- }
|
|
|
- }
|
|
|
- d.cacheMutex.Unlock()
|
|
|
-
|
|
|
items, err := ioutil.ReadDir(d.dataRoot)
|
|
|
if err != nil {
|
|
|
if os.IsNotExist(err) {
|
|
@@ -794,8 +736,8 @@ func (d *Driver) Cleanup() error {
|
|
|
|
|
|
// Cleanup any service VMs we have running, along with their scratch spaces.
|
|
|
// We don't take the lock for this as it's taken in terminateServiceVm.
|
|
|
- for k, v := range d.serviceVms {
|
|
|
- logrus.Debugf("%s svm: %s: %+v", title, k, v)
|
|
|
+ for k, v := range d.serviceVms.svms {
|
|
|
+ logrus.Debugf("%s svm entry: %s: %+v", title, k, v)
|
|
|
d.terminateServiceVM(k, "cleanup", true)
|
|
|
}
|
|
|
|
|
@@ -812,65 +754,41 @@ func (d *Driver) Cleanup() error {
|
|
|
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
|
|
title := fmt.Sprintf("lcowdriver: diff: %s", id)
|
|
|
|
|
|
- logrus.Debugf("%s: locking cacheMutex", title)
|
|
|
- d.cacheMutex.Lock()
|
|
|
- if _, ok := d.cache[id]; !ok {
|
|
|
- logrus.Debugf("%s: releasing cacheMutex on error path", title)
|
|
|
- d.cacheMutex.Unlock()
|
|
|
- return nil, fmt.Errorf("%s fail as %s is not in the cache", title, id)
|
|
|
- }
|
|
|
- ci := d.cache[id]
|
|
|
- logrus.Debugf("%s: releasing cacheMutex", title)
|
|
|
- d.cacheMutex.Unlock()
|
|
|
-
|
|
|
- // Stat to get size
|
|
|
- logrus.Debugf("%s: locking cacheItem", title)
|
|
|
- ci.Lock()
|
|
|
- fileInfo, err := os.Stat(ci.hostPath)
|
|
|
+ // Get VHDX info
|
|
|
+ ld, err := getLayerDetails(d.dir(id))
|
|
|
if err != nil {
|
|
|
- logrus.Debugf("%s: releasing cacheItem on error path", title)
|
|
|
- ci.Unlock()
|
|
|
- return nil, fmt.Errorf("%s failed to stat %s: %s", title, ci.hostPath, err)
|
|
|
+ logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err)
|
|
|
+ return nil, err
|
|
|
}
|
|
|
- logrus.Debugf("%s: releasing cacheItem", title)
|
|
|
- ci.Unlock()
|
|
|
|
|
|
// Start the SVM with a mapped virtual disk. Note that if the SVM is
|
|
|
// already running and we are in global mode, this will be
|
|
|
// hot-added.
|
|
|
- mvd := &hcsshim.MappedVirtualDisk{
|
|
|
- HostPath: ci.hostPath,
|
|
|
- ContainerPath: ci.uvmPath,
|
|
|
+ mvd := hcsshim.MappedVirtualDisk{
|
|
|
+ HostPath: ld.filename,
|
|
|
+ ContainerPath: hostToGuest(ld.filename),
|
|
|
CreateInUtilityVM: true,
|
|
|
ReadOnly: true,
|
|
|
}
|
|
|
|
|
|
logrus.Debugf("%s: starting service VM", title)
|
|
|
- svm, err := d.startServiceVMIfNotRunning(id, mvd, fmt.Sprintf("diff %s", id))
|
|
|
+ svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id))
|
|
|
if err != nil {
|
|
|
return nil, err
|
|
|
}
|
|
|
|
|
|
- // Set `isMounted` for the cache item. Note that we re-scan the cache
|
|
|
- // at this point as it's possible the cacheItem changed during the long-
|
|
|
- // running operation above when we weren't holding the cacheMutex lock.
|
|
|
- logrus.Debugf("%s: locking cacheMutex for updating isMounted", title)
|
|
|
- d.cacheMutex.Lock()
|
|
|
- if _, ok := d.cache[id]; !ok {
|
|
|
- logrus.Debugf("%s: releasing cacheMutex on error path of isMounted", title)
|
|
|
- d.cacheMutex.Unlock()
|
|
|
+ logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting")
|
|
|
+ err = svm.getStartError()
|
|
|
+ if err != nil {
|
|
|
d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
|
|
|
- return nil, fmt.Errorf("%s fail as %s is not in the cache when updating isMounted", title, id)
|
|
|
+ return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err)
|
|
|
}
|
|
|
- ci = d.cache[id]
|
|
|
- ci.setIsMounted()
|
|
|
- logrus.Debugf("%s: releasing cacheMutex for updating isMounted", title)
|
|
|
- d.cacheMutex.Unlock()
|
|
|
|
|
|
// Obtain the tar stream for it
|
|
|
- logrus.Debugf("%s %s, size %d, isSandbox %t", title, ci.hostPath, fileInfo.Size(), ci.isSandbox)
|
|
|
- tarReadCloser, err := svm.config.VhdToTar(ci.hostPath, ci.uvmPath, ci.isSandbox, fileInfo.Size())
|
|
|
+ logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, mvd.ContainerPath, ld.size, ld.isSandbox)
|
|
|
+ tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, mvd.ContainerPath, ld.isSandbox, ld.size)
|
|
|
if err != nil {
|
|
|
+ svm.hotRemoveVHDs(mvd)
|
|
|
d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
|
|
|
return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err)
|
|
|
}
|
|
@@ -878,14 +796,12 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
|
|
logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent)
|
|
|
|
|
|
// In safe/non-global mode, we can't tear down the service VM until things have been read.
|
|
|
- if !d.globalMode {
|
|
|
- return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
|
|
|
- tarReadCloser.Close()
|
|
|
- d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
|
|
|
- return nil
|
|
|
- }), nil
|
|
|
- }
|
|
|
- return tarReadCloser, nil
|
|
|
+ return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
|
|
|
+ tarReadCloser.Close()
|
|
|
+ svm.hotRemoveVHDs(mvd)
|
|
|
+ d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
|
|
|
+ return nil
|
|
|
+ }), nil
|
|
|
}
|
|
|
|
|
|
// ApplyDiff extracts the changeset from the given diff into the
|
|
@@ -902,6 +818,12 @@ func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
|
|
|
}
|
|
|
defer d.terminateServiceVM(id, fmt.Sprintf("applydiff %s", id), false)
|
|
|
|
|
|
+ logrus.Debugf("lcowdriver: applydiff: waiting for svm to finish booting")
|
|
|
+ err = svm.getStartError()
|
|
|
+ if err != nil {
|
|
|
+ return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err)
|
|
|
+ }
|
|
|
+
|
|
|
// TODO @jhowardmsft - the retries are temporary to overcome platform reliablity issues.
|
|
|
// Obviously this will be removed as platform bugs are fixed.
|
|
|
retries := 0
|
|
@@ -944,6 +866,11 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
|
|
|
return m, nil
|
|
|
}
|
|
|
|
|
|
+// GetLayerPath gets the layer path on host (path to VHD/VHDX)
|
|
|
+func (d *Driver) GetLayerPath(id string) (string, error) {
|
|
|
+ return d.dir(id), nil
|
|
|
+}
|
|
|
+
|
|
|
// dir returns the absolute path to the layer.
|
|
|
func (d *Driver) dir(id string) string {
|
|
|
return filepath.Join(d.dataRoot, filepath.Base(id))
|
|
@@ -1006,3 +933,34 @@ func getLayerDetails(folder string) (*layerDetails, error) {
|
|
|
|
|
|
return ld, nil
|
|
|
}
|
|
|
+
|
|
|
+func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) {
|
|
|
+ layerChain, err := d.getLayerChain(id)
|
|
|
+ if err != nil {
|
|
|
+ return nil, err
|
|
|
+ }
|
|
|
+ layerChain = append([]string{d.dir(id)}, layerChain...)
|
|
|
+
|
|
|
+ logrus.Debugf("getting all layers: %v", layerChain)
|
|
|
+ disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain))
|
|
|
+ for i := range layerChain {
|
|
|
+ ld, err := getLayerDetails(layerChain[i])
|
|
|
+ if err != nil {
|
|
|
+ logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err)
|
|
|
+ return nil, err
|
|
|
+ }
|
|
|
+ disks[i].HostPath = ld.filename
|
|
|
+ disks[i].ContainerPath = hostToGuest(ld.filename)
|
|
|
+ disks[i].CreateInUtilityVM = true
|
|
|
+ disks[i].ReadOnly = !ld.isSandbox
|
|
|
+ }
|
|
|
+ return disks, nil
|
|
|
+}
|
|
|
+
|
|
|
+func hostToGuest(hostpath string) string {
|
|
|
+ return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath)))
|
|
|
+}
|
|
|
+
|
|
|
+func unionMountName(disks []hcsshim.MappedVirtualDisk) string {
|
|
|
+ return fmt.Sprintf("%s-mount", disks[0].ContainerPath)
|
|
|
+}
|