Remove LCOW code (step 1)
The LCOW implementation in dockerd has been deprecated in favor of re-implementation in containerd (in progress). Microsoft started removing the LCOW V1 code from the build dependencies we use in Microsoft/opengcs (soon to be part of Microsoft/hcshhim), which means that we need to start removing this code. This first step removes the lcow graphdriver, the LCOW initialization code, and some LCOW-related utilities. Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
a77317882d
commit
e047d984dc
31 changed files with 191 additions and 5546 deletions
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
|
@ -6,7 +6,6 @@
|
|||
builder/** @tonistiigi
|
||||
contrib/mkimage/** @tianon
|
||||
daemon/graphdriver/devmapper/** @rhvgoyal
|
||||
daemon/graphdriver/lcow/** @johnstep
|
||||
daemon/graphdriver/overlay/** @dmcgowan
|
||||
daemon/graphdriver/overlay2/** @dmcgowan
|
||||
daemon/graphdriver/windows/** @johnstep
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/registry"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -50,9 +49,6 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.ValidatePlatform(sp); err != nil {
|
||||
return err
|
||||
}
|
||||
platform = &sp
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/control"
|
||||
|
@ -299,13 +298,10 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
|||
if opt.Options.Platform != "" {
|
||||
// same as in newBuilder in builder/dockerfile.builder.go
|
||||
// TODO: remove once opt.Options.Platform is of type specs.Platform
|
||||
sp, err := platforms.Parse(opt.Options.Platform)
|
||||
_, err := platforms.Parse(opt.Options.Platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := system.ValidatePlatform(sp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frontendAttrs["platform"] = opt.Options.Platform
|
||||
}
|
||||
|
||||
|
|
|
@ -159,9 +159,6 @@ func newBuilder(clientCtx context.Context, options builderOptions) (*Builder, er
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := system.ValidatePlatform(sp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.platform = &sp
|
||||
}
|
||||
|
||||
|
|
|
@ -172,9 +172,6 @@ func initializeStage(d dispatchRequest, cmd *instructions.Stage) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse platform %s", v)
|
||||
}
|
||||
if err := system.ValidatePlatform(p); err != nil {
|
||||
return err
|
||||
}
|
||||
platform = &p
|
||||
}
|
||||
|
||||
|
@ -264,10 +261,7 @@ func (d *dispatchRequest) getImageOrStage(name string, platform *specs.Platform)
|
|||
// from it.
|
||||
if runtime.GOOS == "windows" {
|
||||
if platform == nil || platform.OS == "linux" {
|
||||
if !system.LCOWSupported() {
|
||||
return nil, errors.New("Linux containers are not supported on this system")
|
||||
}
|
||||
imageImage.OS = "linux"
|
||||
return nil, errors.New("Linux containers are not supported on this system")
|
||||
} else if platform.OS == "windows" {
|
||||
return nil, errors.New("Windows does not support FROM scratch")
|
||||
} else {
|
||||
|
|
|
@ -117,8 +117,6 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
|||
return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
|
||||
}
|
||||
|
||||
system.InitLCOW(cli.Config.Experimental)
|
||||
|
||||
if err := setDefaultUmask(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -746,7 +746,7 @@ func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string
|
|||
}
|
||||
|
||||
env := make([]string, 0, envSize)
|
||||
if runtime.GOOS != "windows" || (runtime.GOOS == "windows" && os == "linux") {
|
||||
if runtime.GOOS != "windows" {
|
||||
env = append(env, "PATH="+system.DefaultPathEnv(os))
|
||||
env = append(env, "HOSTNAME="+container.Config.Hostname)
|
||||
if tty {
|
||||
|
|
|
@ -69,12 +69,6 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container
|
|||
if err == nil {
|
||||
os = img.OS
|
||||
}
|
||||
} else {
|
||||
// This mean scratch. On Windows, we can safely assume that this is a linux
|
||||
// container. On other platforms, it's the host OS (which it already is)
|
||||
if isWindows && system.LCOWSupported() {
|
||||
os = "linux"
|
||||
}
|
||||
}
|
||||
|
||||
warnings, err := daemon.verifyContainerSettings(os, opts.params.HostConfig, opts.params.Config, false)
|
||||
|
|
|
@ -499,19 +499,12 @@ func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig
|
|||
// conditionalMountOnStart is a platform specific helper function during the
|
||||
// container start to call mount.
|
||||
func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
|
||||
|
||||
// Bail out now for Linux containers. We cannot mount the containers filesystem on the
|
||||
// host as it is a non-Windows filesystem.
|
||||
if system.LCOWSupported() && container.OS != "windows" {
|
||||
if daemon.runAsHyperVContainer(container.HostConfig) {
|
||||
// We do not mount if a Hyper-V container as it needs to be mounted inside the
|
||||
// utility VM, not the host.
|
||||
return nil
|
||||
}
|
||||
|
||||
// We do not mount if a Hyper-V container as it needs to be mounted inside the
|
||||
// utility VM, not the host.
|
||||
if !daemon.runAsHyperVContainer(container.HostConfig) {
|
||||
return daemon.Mount(container)
|
||||
}
|
||||
return nil
|
||||
return daemon.Mount(container)
|
||||
}
|
||||
|
||||
// conditionalUnmountOnCleanup is a platform specific helper function called
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,421 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Microsoft/opengcs/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Code for all the service VM management for the LCOW graphdriver
|
||||
|
||||
var errVMisTerminating = errors.New("service VM is shutting down")
|
||||
var errVMUnknown = errors.New("service vm id is unknown")
|
||||
var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used")
|
||||
|
||||
// serviceVMMap is the struct representing the id -> service VM mapping.
|
||||
type serviceVMMap struct {
|
||||
sync.Mutex
|
||||
svms map[string]*serviceVMMapItem
|
||||
}
|
||||
|
||||
// serviceVMMapItem is our internal structure representing an item in our
|
||||
// map of service VMs we are maintaining.
|
||||
type serviceVMMapItem struct {
|
||||
svm *serviceVM // actual service vm object
|
||||
refCount int // refcount for VM
|
||||
}
|
||||
|
||||
// attachedVHD is for reference counting SCSI disks attached to a service VM,
|
||||
// and for a counter used to generate a short path name for the container path.
|
||||
type attachedVHD struct {
|
||||
refCount int
|
||||
attachCounter uint64
|
||||
}
|
||||
|
||||
type serviceVM struct {
|
||||
sync.Mutex // Serialises operations being performed in this service VM.
|
||||
scratchAttached bool // Has a scratch been attached?
|
||||
config *client.Config // Represents the service VM item.
|
||||
|
||||
// Indicates that the vm is started
|
||||
startStatus chan interface{}
|
||||
startError error
|
||||
|
||||
// Indicates that the vm is stopped
|
||||
stopStatus chan interface{}
|
||||
stopError error
|
||||
|
||||
attachCounter uint64 // Increasing counter for each add
|
||||
attachedVHDs map[string]*attachedVHD // Map ref counting all the VHDS we've hot-added/hot-removed.
|
||||
unionMounts map[string]int // Map ref counting all the union filesystems we mounted.
|
||||
}
|
||||
|
||||
// add will add an id to the service vm map. There are three cases:
|
||||
// - entry doesn't exist:
|
||||
// - add id to map and return a new vm that the caller can manually configure+start
|
||||
// - entry does exist
|
||||
// - return vm in map and increment ref count
|
||||
// - entry does exist but the ref count is 0
|
||||
// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
|
||||
func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
if svm, ok := svmMap.svms[id]; ok {
|
||||
if svm.refCount == 0 {
|
||||
return svm.svm, true, errVMisTerminating
|
||||
}
|
||||
svm.refCount++
|
||||
return svm.svm, true, nil
|
||||
}
|
||||
|
||||
// Doesn't exist, so create an empty svm to put into map and return
|
||||
newSVM := &serviceVM{
|
||||
startStatus: make(chan interface{}),
|
||||
stopStatus: make(chan interface{}),
|
||||
attachedVHDs: make(map[string]*attachedVHD),
|
||||
unionMounts: make(map[string]int),
|
||||
config: &client.Config{},
|
||||
}
|
||||
svmMap.svms[id] = &serviceVMMapItem{
|
||||
svm: newSVM,
|
||||
refCount: 1,
|
||||
}
|
||||
return newSVM, false, nil
|
||||
}
|
||||
|
||||
// get will get the service vm from the map. There are three cases:
|
||||
// - entry doesn't exist:
|
||||
// - return errVMUnknown
|
||||
// - entry does exist
|
||||
// - return vm with no error
|
||||
// - entry does exist but the ref count is 0
|
||||
// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
|
||||
func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
svm, ok := svmMap.svms[id]
|
||||
if !ok {
|
||||
return nil, errVMUnknown
|
||||
}
|
||||
if svm.refCount == 0 {
|
||||
return svm.svm, errVMisTerminating
|
||||
}
|
||||
return svm.svm, nil
|
||||
}
|
||||
|
||||
// decrementRefCount decrements the ref count of the given ID from the map. There are four cases:
|
||||
// - entry doesn't exist:
|
||||
// - return errVMUnknown
|
||||
// - entry does exist but the ref count is 0
|
||||
// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
|
||||
// - entry does exist but ref count is 1
|
||||
// - return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map
|
||||
// - and execute svm.signalStopFinished to signal the threads that the svm has been terminated.
|
||||
// - entry does exist and ref count > 1
|
||||
// - just reduce ref count and return svm
|
||||
func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
|
||||
svm, ok := svmMap.svms[id]
|
||||
if !ok {
|
||||
return nil, false, errVMUnknown
|
||||
}
|
||||
if svm.refCount == 0 {
|
||||
return svm.svm, false, errVMisTerminating
|
||||
}
|
||||
svm.refCount--
|
||||
return svm.svm, svm.refCount == 0, nil
|
||||
}
|
||||
|
||||
// setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it.
|
||||
func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
|
||||
svm, ok := svmMap.svms[id]
|
||||
if !ok {
|
||||
return nil, errVMUnknown
|
||||
}
|
||||
if svm.refCount == 0 {
|
||||
return svm.svm, errVMisTerminating
|
||||
}
|
||||
svm.refCount = 0
|
||||
return svm.svm, nil
|
||||
}
|
||||
|
||||
// deleteID deletes the given ID from the map. If the refcount is not 0 or the
|
||||
// VM does not exist, then this function returns an error.
|
||||
func (svmMap *serviceVMMap) deleteID(id string) error {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
svm, ok := svmMap.svms[id]
|
||||
if !ok {
|
||||
return errVMUnknown
|
||||
}
|
||||
if svm.refCount != 0 {
|
||||
return errVMStillHasReference
|
||||
}
|
||||
delete(svmMap.svms, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svm *serviceVM) signalStartFinished(err error) {
|
||||
svm.Lock()
|
||||
svm.startError = err
|
||||
svm.Unlock()
|
||||
close(svm.startStatus)
|
||||
}
|
||||
|
||||
func (svm *serviceVM) getStartError() error {
|
||||
<-svm.startStatus
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
return svm.startError
|
||||
}
|
||||
|
||||
func (svm *serviceVM) signalStopFinished(err error) {
|
||||
svm.Lock()
|
||||
svm.stopError = err
|
||||
svm.Unlock()
|
||||
close(svm.stopStatus)
|
||||
}
|
||||
|
||||
func (svm *serviceVM) getStopError() error {
|
||||
<-svm.stopStatus
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
return svm.stopError
|
||||
}
|
||||
|
||||
// hotAddVHDs waits for the service vm to start and then attaches the vhds.
|
||||
func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
|
||||
if err := svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
return svm.hotAddVHDsAtStart(mvds...)
|
||||
}
|
||||
|
||||
// hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start.
|
||||
func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error {
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
for i, mvd := range mvds {
|
||||
if _, ok := svm.attachedVHDs[mvd.HostPath]; ok {
|
||||
svm.attachedVHDs[mvd.HostPath].refCount++
|
||||
logrus.Debugf("lcowdriver: UVM %s: %s already present, refCount now %d", svm.config.Name, mvd.HostPath, svm.attachedVHDs[mvd.HostPath].refCount)
|
||||
continue
|
||||
}
|
||||
|
||||
svm.attachCounter++
|
||||
shortContainerPath := remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name)
|
||||
if err := svm.config.HotAddVhd(mvd.HostPath, shortContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil {
|
||||
svm.hotRemoveVHDsNoLock(mvds[:i]...)
|
||||
return err
|
||||
}
|
||||
svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hotRemoveVHDs waits for the service vm to start and then removes the vhds.
|
||||
// The service VM must not be locked when calling this function.
|
||||
func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
|
||||
if err := svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
return svm.hotRemoveVHDsNoLock(mvds...)
|
||||
}
|
||||
|
||||
// hotRemoveVHDsNoLock removes VHDs from a service VM. When calling this function,
|
||||
// the contract is the service VM lock must be held.
|
||||
func (svm *serviceVM) hotRemoveVHDsNoLock(mvds ...hcsshim.MappedVirtualDisk) error {
|
||||
var retErr error
|
||||
for _, mvd := range mvds {
|
||||
if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok {
|
||||
// We continue instead of returning an error if we try to hot remove a non-existent VHD.
|
||||
// This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get()
|
||||
// defers the VM start to the first operation, it's possible that nothing have been hot-added
|
||||
// when Put() is called. To avoid Put returning an error in that case, we simply continue if we
|
||||
// don't find the vhd attached.
|
||||
logrus.Debugf("lcowdriver: UVM %s: %s is not attached, not doing anything", svm.config.Name, mvd.HostPath)
|
||||
continue
|
||||
}
|
||||
|
||||
if svm.attachedVHDs[mvd.HostPath].refCount > 1 {
|
||||
svm.attachedVHDs[mvd.HostPath].refCount--
|
||||
logrus.Debugf("lcowdriver: UVM %s: %s refCount dropped to %d. not removing from UVM", svm.config.Name, mvd.HostPath, svm.attachedVHDs[mvd.HostPath].refCount)
|
||||
continue
|
||||
}
|
||||
|
||||
// last reference to VHD, so remove from VM and map
|
||||
if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil {
|
||||
delete(svm.attachedVHDs, mvd.HostPath)
|
||||
} else {
|
||||
// Take note of the error, but still continue to remove the other VHDs
|
||||
logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err)
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error {
|
||||
if err := svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile)
|
||||
}
|
||||
|
||||
// getShortContainerPath looks up where a SCSI disk was actually mounted
|
||||
// in a service VM when we remapped a long path name to a short name.
|
||||
func (svm *serviceVM) getShortContainerPath(mvd *hcsshim.MappedVirtualDisk) string {
|
||||
if mvd.ContainerPath == "" {
|
||||
return ""
|
||||
}
|
||||
avhd, ok := svm.attachedVHDs[mvd.HostPath]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("/tmp/d%d", avhd.attachCounter)
|
||||
}
|
||||
|
||||
func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) {
|
||||
if len(mvds) == 0 {
|
||||
return fmt.Errorf("createUnionMount: error must have at least 1 layer")
|
||||
}
|
||||
|
||||
if err = svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
if _, ok := svm.unionMounts[mountName]; ok {
|
||||
svm.unionMounts[mountName]++
|
||||
return nil
|
||||
}
|
||||
|
||||
var lowerLayers []string
|
||||
if mvds[0].ReadOnly {
|
||||
lowerLayers = append(lowerLayers, svm.getShortContainerPath(&mvds[0]))
|
||||
}
|
||||
|
||||
for i := 1; i < len(mvds); i++ {
|
||||
lowerLayers = append(lowerLayers, svm.getShortContainerPath(&mvds[i]))
|
||||
}
|
||||
|
||||
logrus.Debugf("Doing the overlay mount with union directory=%s", mountName)
|
||||
errOut := &bytes.Buffer{}
|
||||
if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, errOut); err != nil {
|
||||
return errors.Wrapf(err, "mkdir -p %s failed (%s)", mountName, errOut.String())
|
||||
}
|
||||
|
||||
var cmd string
|
||||
if len(mvds) == 1 {
|
||||
// `FROM SCRATCH` case and the only layer. No overlay required.
|
||||
cmd = fmt.Sprintf("mount %s %s", svm.getShortContainerPath(&mvds[0]), mountName)
|
||||
} else if mvds[0].ReadOnly {
|
||||
// Readonly overlay
|
||||
cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s",
|
||||
strings.Join(lowerLayers, ","),
|
||||
mountName)
|
||||
} else {
|
||||
upper := fmt.Sprintf("%s/upper", svm.getShortContainerPath(&mvds[0]))
|
||||
work := fmt.Sprintf("%s/work", svm.getShortContainerPath(&mvds[0]))
|
||||
|
||||
errOut := &bytes.Buffer{}
|
||||
if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, errOut); err != nil {
|
||||
return errors.Wrapf(err, "mkdir -p %s failed (%s)", mountName, errOut.String())
|
||||
}
|
||||
|
||||
cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s",
|
||||
strings.Join(lowerLayers, ":"),
|
||||
upper,
|
||||
work,
|
||||
mountName)
|
||||
}
|
||||
|
||||
logrus.Debugf("createUnionMount: Executing mount=%s", cmd)
|
||||
errOut = &bytes.Buffer{}
|
||||
if err = svm.runProcess(cmd, nil, nil, errOut); err != nil {
|
||||
return errors.Wrapf(err, "%s failed (%s)", cmd, errOut.String())
|
||||
}
|
||||
|
||||
svm.unionMounts[mountName] = 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error {
|
||||
if err := svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
if _, ok := svm.unionMounts[mountName]; !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if svm.unionMounts[mountName] > 1 {
|
||||
svm.unionMounts[mountName]--
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Removing union mount %s", mountName)
|
||||
if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(svm.unionMounts, mountName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
|
||||
var process hcsshim.Process
|
||||
var err error
|
||||
errOut := &bytes.Buffer{}
|
||||
|
||||
if stderr != nil {
|
||||
process, err = svm.config.RunProcess(command, stdin, stdout, stderr)
|
||||
} else {
|
||||
process, err = svm.config.RunProcess(command, stdin, stdout, errOut)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer process.Close()
|
||||
|
||||
process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds))
|
||||
exitCode, err := process.ExitCode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
// If the caller isn't explicitly capturing stderr output, then capture it here instead.
|
||||
e := fmt.Sprintf("svm.runProcess: command %s failed with exit code %d", command, exitCode)
|
||||
if stderr == nil {
|
||||
e = fmt.Sprintf("%s. (%s)", e, errOut.String())
|
||||
}
|
||||
return fmt.Errorf(e)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,139 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type lcowfs struct {
|
||||
root string
|
||||
d *Driver
|
||||
mappedDisks []hcsshim.MappedVirtualDisk
|
||||
vmID string
|
||||
currentSVM *serviceVM
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
var _ containerfs.ContainerFS = &lcowfs{}
|
||||
|
||||
// ErrNotSupported is an error for unsupported operations in the remotefs
|
||||
var ErrNotSupported = fmt.Errorf("not supported")
|
||||
|
||||
// Functions to implement the ContainerFS interface
|
||||
func (l *lcowfs) Path() string {
|
||||
return l.root
|
||||
}
|
||||
|
||||
func (l *lcowfs) ResolveScopedPath(path string, rawPath bool) (string, error) {
|
||||
logrus.Debugf("remotefs.resolvescopedpath inputs: %s %s ", path, l.root)
|
||||
|
||||
arg1 := l.Join(l.root, path)
|
||||
if !rawPath {
|
||||
// The l.Join("/", path) will make path an absolute path and then clean it
|
||||
// so if path = ../../X, it will become /X.
|
||||
arg1 = l.Join(l.root, l.Join("/", path))
|
||||
}
|
||||
arg2 := l.root
|
||||
|
||||
output := &bytes.Buffer{}
|
||||
if err := l.runRemoteFSProcess(nil, output, remotefs.ResolvePathCmd, arg1, arg2); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
logrus.Debugf("remotefs.resolvescopedpath success. Output: %s\n", output.String())
|
||||
return output.String(), nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) OS() string {
|
||||
return "linux"
|
||||
}
|
||||
|
||||
func (l *lcowfs) Architecture() string {
|
||||
return runtime.GOARCH
|
||||
}
|
||||
|
||||
// Other functions that are used by docker like the daemon Archiver/Extractor
|
||||
func (l *lcowfs) ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error {
|
||||
logrus.Debugf("remotefs.ExtractArchve inputs: %s %+v", dst, opts)
|
||||
|
||||
tarBuf := &bytes.Buffer{}
|
||||
if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
|
||||
return fmt.Errorf("failed to marshall tar opts: %s", err)
|
||||
}
|
||||
|
||||
input := io.MultiReader(tarBuf, src)
|
||||
if err := l.runRemoteFSProcess(input, nil, remotefs.ExtractArchiveCmd, dst); err != nil {
|
||||
return fmt.Errorf("failed to extract archive to %s: %s", dst, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) {
|
||||
logrus.Debugf("remotefs.ArchivePath: %s %+v", src, opts)
|
||||
|
||||
tarBuf := &bytes.Buffer{}
|
||||
if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
|
||||
return nil, fmt.Errorf("failed to marshall tar opts: %s", err)
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
defer w.Close()
|
||||
if err := l.runRemoteFSProcess(tarBuf, w, remotefs.ArchivePathCmd, src); err != nil {
|
||||
logrus.Debugf("REMOTEFS: Failed to extract archive: %s %+v %s", src, opts, err)
|
||||
}
|
||||
}()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
func (l *lcowfs) startVM() error {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
if l.currentSVM != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
svm, err := l.d.startServiceVMIfNotRunning(l.vmID, l.mappedDisks, fmt.Sprintf("lcowfs.startVM"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = svm.createUnionMount(l.root, l.mappedDisks...); err != nil {
|
||||
return err
|
||||
}
|
||||
l.currentSVM = svm
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) runRemoteFSProcess(stdin io.Reader, stdout io.Writer, args ...string) error {
|
||||
if err := l.startVM(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append remotefs prefix and setup as a command line string
|
||||
cmd := fmt.Sprintf("%s %s", remotefs.RemotefsCmd, strings.Join(args, " "))
|
||||
stderr := &bytes.Buffer{}
|
||||
if err := l.currentSVM.runProcess(cmd, stdin, stdout, stderr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eerr, err := remotefs.ReadError(stderr)
|
||||
if eerr != nil {
|
||||
// Process returned an error so return that.
|
||||
return remotefs.ExportedToError(eerr)
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -1,211 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
|
||||
"github.com/containerd/continuity/driver"
|
||||
)
|
||||
|
||||
type lcowfile struct {
|
||||
process hcsshim.Process
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderr io.ReadCloser
|
||||
fs *lcowfs
|
||||
guestPath string
|
||||
}
|
||||
|
||||
func (l *lcowfs) Open(path string) (driver.File, error) {
|
||||
return l.OpenFile(path, os.O_RDONLY, 0)
|
||||
}
|
||||
|
||||
func (l *lcowfs) OpenFile(path string, flag int, perm os.FileMode) (_ driver.File, err error) {
|
||||
flagStr := strconv.FormatInt(int64(flag), 10)
|
||||
permStr := strconv.FormatUint(uint64(perm), 8)
|
||||
|
||||
commandLine := fmt.Sprintf("%s %s %s %s %s", remotefs.RemotefsCmd, remotefs.OpenFileCmd, path, flagStr, permStr)
|
||||
env := make(map[string]string)
|
||||
env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:"
|
||||
processConfig := &hcsshim.ProcessConfig{
|
||||
EmulateConsole: false,
|
||||
CreateStdInPipe: true,
|
||||
CreateStdOutPipe: true,
|
||||
CreateStdErrPipe: true,
|
||||
CreateInUtilityVm: true,
|
||||
WorkingDirectory: "/bin",
|
||||
Environment: env,
|
||||
CommandLine: commandLine,
|
||||
}
|
||||
|
||||
process, err := l.currentSVM.config.Uvm.CreateProcess(processConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file %s: %s", path, err)
|
||||
}
|
||||
|
||||
stdin, stdout, stderr, err := process.Stdio()
|
||||
if err != nil {
|
||||
process.Kill()
|
||||
process.Close()
|
||||
return nil, fmt.Errorf("failed to open file pipes %s: %s", path, err)
|
||||
}
|
||||
|
||||
lf := &lcowfile{
|
||||
process: process,
|
||||
stdin: stdin,
|
||||
stdout: stdout,
|
||||
stderr: stderr,
|
||||
fs: l,
|
||||
guestPath: path,
|
||||
}
|
||||
|
||||
if _, err := lf.getResponse(); err != nil {
|
||||
return nil, fmt.Errorf("failed to open file %s: %s", path, err)
|
||||
}
|
||||
return lf, nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) Read(b []byte) (int, error) {
|
||||
hdr := &remotefs.FileHeader{
|
||||
Cmd: remotefs.Read,
|
||||
Size: uint64(len(b)),
|
||||
}
|
||||
|
||||
if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
buf, err := l.getResponse()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := copy(b, buf)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) Write(b []byte) (int, error) {
|
||||
hdr := &remotefs.FileHeader{
|
||||
Cmd: remotefs.Write,
|
||||
Size: uint64(len(b)),
|
||||
}
|
||||
|
||||
if err := remotefs.WriteFileHeader(l.stdin, hdr, b); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, err := l.getResponse()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) Seek(offset int64, whence int) (int64, error) {
|
||||
seekHdr := &remotefs.SeekHeader{
|
||||
Offset: offset,
|
||||
Whence: int32(whence),
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
if err := binary.Write(buf, binary.BigEndian, seekHdr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
hdr := &remotefs.FileHeader{
|
||||
Cmd: remotefs.Write,
|
||||
Size: uint64(buf.Len()),
|
||||
}
|
||||
if err := remotefs.WriteFileHeader(l.stdin, hdr, buf.Bytes()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
resBuf, err := l.getResponse()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var res int64
|
||||
if err := binary.Read(bytes.NewBuffer(resBuf), binary.BigEndian, &res); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) Close() error {
|
||||
hdr := &remotefs.FileHeader{
|
||||
Cmd: remotefs.Close,
|
||||
Size: 0,
|
||||
}
|
||||
|
||||
if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := l.getResponse()
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *lcowfile) Readdir(n int) ([]os.FileInfo, error) {
|
||||
nStr := strconv.FormatInt(int64(n), 10)
|
||||
|
||||
// Unlike the other File functions, this one can just be run without maintaining state,
|
||||
// so just do the normal runRemoteFSProcess way.
|
||||
buf := &bytes.Buffer{}
|
||||
if err := l.fs.runRemoteFSProcess(nil, buf, remotefs.ReadDirCmd, l.guestPath, nStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info []remotefs.FileInfo
|
||||
if err := json.Unmarshal(buf.Bytes(), &info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
osInfo := make([]os.FileInfo, len(info))
|
||||
for i := range info {
|
||||
osInfo[i] = &info[i]
|
||||
}
|
||||
return osInfo, nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) getResponse() ([]byte, error) {
|
||||
hdr, err := remotefs.ReadFileHeader(l.stdout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hdr.Cmd != remotefs.CmdOK {
|
||||
// Something went wrong during the openfile in the server.
|
||||
// Parse stderr and return that as an error
|
||||
eerr, err := remotefs.ReadError(l.stderr)
|
||||
if eerr != nil {
|
||||
return nil, remotefs.ExportedToError(eerr)
|
||||
}
|
||||
|
||||
// Maybe the parsing went wrong?
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// At this point, we know something went wrong in the remotefs program, but
|
||||
// we we don't know why.
|
||||
return nil, fmt.Errorf("unknown error")
|
||||
}
|
||||
|
||||
// Successful command, we might have some data to read (for Read + Seek)
|
||||
buf := make([]byte, hdr.Size, hdr.Size)
|
||||
if _, err := io.ReadFull(l.stdout, buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
|
@ -1,123 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ driver.Driver = &lcowfs{}
|
||||
|
||||
func (l *lcowfs) Readlink(p string) (string, error) {
|
||||
logrus.Debugf("removefs.readlink args: %s", p)
|
||||
|
||||
result := &bytes.Buffer{}
|
||||
if err := l.runRemoteFSProcess(nil, result, remotefs.ReadlinkCmd, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.String(), nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) Mkdir(path string, mode os.FileMode) error {
|
||||
return l.mkdir(path, mode, remotefs.MkdirCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) MkdirAll(path string, mode os.FileMode) error {
|
||||
return l.mkdir(path, mode, remotefs.MkdirAllCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) mkdir(path string, mode os.FileMode, cmd string) error {
|
||||
modeStr := strconv.FormatUint(uint64(mode), 8)
|
||||
logrus.Debugf("remotefs.%s args: %s %s", cmd, path, modeStr)
|
||||
return l.runRemoteFSProcess(nil, nil, cmd, path, modeStr)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Remove(path string) error {
|
||||
return l.remove(path, remotefs.RemoveCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) RemoveAll(path string) error {
|
||||
return l.remove(path, remotefs.RemoveAllCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) remove(path string, cmd string) error {
|
||||
logrus.Debugf("remotefs.%s args: %s", cmd, path)
|
||||
return l.runRemoteFSProcess(nil, nil, cmd, path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Link(oldname, newname string) error {
|
||||
return l.link(oldname, newname, remotefs.LinkCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Symlink(oldname, newname string) error {
|
||||
return l.link(oldname, newname, remotefs.SymlinkCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) link(oldname, newname, cmd string) error {
|
||||
logrus.Debugf("remotefs.%s args: %s %s", cmd, oldname, newname)
|
||||
return l.runRemoteFSProcess(nil, nil, cmd, oldname, newname)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Lchown(name string, uid, gid int64) error {
|
||||
uidStr := strconv.FormatInt(uid, 10)
|
||||
gidStr := strconv.FormatInt(gid, 10)
|
||||
|
||||
logrus.Debugf("remotefs.lchown args: %s %s %s", name, uidStr, gidStr)
|
||||
return l.runRemoteFSProcess(nil, nil, remotefs.LchownCmd, name, uidStr, gidStr)
|
||||
}
|
||||
|
||||
// Lchmod changes the mode of an file not following symlinks.
|
||||
func (l *lcowfs) Lchmod(path string, mode os.FileMode) error {
|
||||
modeStr := strconv.FormatUint(uint64(mode), 8)
|
||||
logrus.Debugf("remotefs.lchmod args: %s %s", path, modeStr)
|
||||
return l.runRemoteFSProcess(nil, nil, remotefs.LchmodCmd, path, modeStr)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Mknod(path string, mode os.FileMode, major, minor int) error {
|
||||
modeStr := strconv.FormatUint(uint64(mode), 8)
|
||||
majorStr := strconv.FormatUint(uint64(major), 10)
|
||||
minorStr := strconv.FormatUint(uint64(minor), 10)
|
||||
|
||||
logrus.Debugf("remotefs.mknod args: %s %s %s %s", path, modeStr, majorStr, minorStr)
|
||||
return l.runRemoteFSProcess(nil, nil, remotefs.MknodCmd, path, modeStr, majorStr, minorStr)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Mkfifo(path string, mode os.FileMode) error {
|
||||
modeStr := strconv.FormatUint(uint64(mode), 8)
|
||||
logrus.Debugf("remotefs.mkfifo args: %s %s", path, modeStr)
|
||||
return l.runRemoteFSProcess(nil, nil, remotefs.MkfifoCmd, path, modeStr)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Stat(p string) (os.FileInfo, error) {
|
||||
return l.stat(p, remotefs.StatCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Lstat(p string) (os.FileInfo, error) {
|
||||
return l.stat(p, remotefs.LstatCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) stat(path string, cmd string) (os.FileInfo, error) {
|
||||
logrus.Debugf("remotefs.stat inputs: %s %s", cmd, path)
|
||||
|
||||
output := &bytes.Buffer{}
|
||||
err := l.runRemoteFSProcess(nil, output, cmd, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fi remotefs.FileInfo
|
||||
if err := json.Unmarshal(output.Bytes(), &fi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("remotefs.stat success. got: %v\n", fi)
|
||||
return &fi, nil
|
||||
}
|
|
@ -1,212 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/continuity/pathdriver"
|
||||
)
|
||||
|
||||
var _ pathdriver.PathDriver = &lcowfs{}
|
||||
|
||||
// Continuity Path functions can be done locally
|
||||
func (l *lcowfs) Join(path ...string) string {
|
||||
return pathpkg.Join(path...)
|
||||
}
|
||||
|
||||
func (l *lcowfs) IsAbs(path string) bool {
|
||||
return pathpkg.IsAbs(path)
|
||||
}
|
||||
|
||||
func sameWord(a, b string) bool {
|
||||
return a == b
|
||||
}
|
||||
|
||||
// Implementation taken from the Go standard library
|
||||
func (l *lcowfs) Rel(basepath, targpath string) (string, error) {
|
||||
baseVol := ""
|
||||
targVol := ""
|
||||
base := l.Clean(basepath)
|
||||
targ := l.Clean(targpath)
|
||||
if sameWord(targ, base) {
|
||||
return ".", nil
|
||||
}
|
||||
base = base[len(baseVol):]
|
||||
targ = targ[len(targVol):]
|
||||
if base == "." {
|
||||
base = ""
|
||||
}
|
||||
// Can't use IsAbs - `\a` and `a` are both relative in Windows.
|
||||
baseSlashed := len(base) > 0 && base[0] == l.Separator()
|
||||
targSlashed := len(targ) > 0 && targ[0] == l.Separator()
|
||||
if baseSlashed != targSlashed || !sameWord(baseVol, targVol) {
|
||||
return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
|
||||
}
|
||||
// Position base[b0:bi] and targ[t0:ti] at the first differing elements.
|
||||
bl := len(base)
|
||||
tl := len(targ)
|
||||
var b0, bi, t0, ti int
|
||||
for {
|
||||
for bi < bl && base[bi] != l.Separator() {
|
||||
bi++
|
||||
}
|
||||
for ti < tl && targ[ti] != l.Separator() {
|
||||
ti++
|
||||
}
|
||||
if !sameWord(targ[t0:ti], base[b0:bi]) {
|
||||
break
|
||||
}
|
||||
if bi < bl {
|
||||
bi++
|
||||
}
|
||||
if ti < tl {
|
||||
ti++
|
||||
}
|
||||
b0 = bi
|
||||
t0 = ti
|
||||
}
|
||||
if base[b0:bi] == ".." {
|
||||
return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
|
||||
}
|
||||
if b0 != bl {
|
||||
// Base elements left. Must go up before going down.
|
||||
seps := strings.Count(base[b0:bl], string(l.Separator()))
|
||||
size := 2 + seps*3
|
||||
if tl != t0 {
|
||||
size += 1 + tl - t0
|
||||
}
|
||||
buf := make([]byte, size)
|
||||
n := copy(buf, "..")
|
||||
for i := 0; i < seps; i++ {
|
||||
buf[n] = l.Separator()
|
||||
copy(buf[n+1:], "..")
|
||||
n += 3
|
||||
}
|
||||
if t0 != tl {
|
||||
buf[n] = l.Separator()
|
||||
copy(buf[n+1:], targ[t0:])
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
return targ[t0:], nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) Base(path string) string {
|
||||
return pathpkg.Base(path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Dir(path string) string {
|
||||
return pathpkg.Dir(path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Clean(path string) string {
|
||||
return pathpkg.Clean(path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Split(path string) (dir, file string) {
|
||||
return pathpkg.Split(path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Separator() byte {
|
||||
return '/'
|
||||
}
|
||||
|
||||
func (l *lcowfs) Abs(path string) (string, error) {
|
||||
// Abs is supposed to add the current working directory, which is meaningless in lcow.
|
||||
// So, return an error.
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
// Implementation taken from the Go standard library
|
||||
func (l *lcowfs) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
info, err := l.Lstat(root)
|
||||
if err != nil {
|
||||
err = walkFn(root, nil, err)
|
||||
} else {
|
||||
err = l.walk(root, info, walkFn)
|
||||
}
|
||||
if err == filepath.SkipDir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// walk recursively descends path, calling w.
|
||||
func (l *lcowfs) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
|
||||
err := walkFn(path, info, nil)
|
||||
if err != nil {
|
||||
if info.IsDir() && err == filepath.SkipDir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
names, err := l.readDirNames(path)
|
||||
if err != nil {
|
||||
return walkFn(path, info, err)
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
filename := l.Join(path, name)
|
||||
fileInfo, err := l.Lstat(filename)
|
||||
if err != nil {
|
||||
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = l.walk(filename, fileInfo, walkFn)
|
||||
if err != nil {
|
||||
if !fileInfo.IsDir() || err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDirNames reads the directory named by dirname and returns
|
||||
// a sorted list of directory entries.
|
||||
func (l *lcowfs) readDirNames(dirname string) ([]string, error) {
|
||||
f, err := l.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := make([]string, len(files), len(files))
|
||||
for i := range files {
|
||||
names[i] = files[i].Name()
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Note that Go's filepath.FromSlash/ToSlash convert between OS paths and '/'. Since the path separator
|
||||
// for LCOW (and Unix) is '/', they are no-ops.
|
||||
func (l *lcowfs) FromSlash(path string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func (l *lcowfs) ToSlash(path string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func (l *lcowfs) Match(pattern, name string) (matched bool, err error) {
|
||||
return pathpkg.Match(pattern, name)
|
||||
}
|
|
@ -2,6 +2,5 @@ package register // import "github.com/docker/docker/daemon/graphdriver/register
|
|||
|
||||
import (
|
||||
// register the windows graph drivers
|
||||
_ "github.com/docker/docker/daemon/graphdriver/lcow"
|
||||
_ "github.com/docker/docker/daemon/graphdriver/windows"
|
||||
)
|
||||
|
|
|
@ -9,12 +9,6 @@
|
|||
$ErrorActionPreference = 'Stop'
|
||||
$StartTime=Get-Date
|
||||
|
||||
# Put up top to be blindingly obvious. The production jenkins.dockerproject.org Linux-container
|
||||
# CI job is "Docker-PRs-LoW-RS3". Force into LCOW mode for this run, or not.
|
||||
if ($env:BUILD_TAG -match "-LoW") { $env:LCOW_MODE=1 }
|
||||
if ($env:BUILD_TAG -match "-WoW") { $env:LCOW_MODE="" }
|
||||
|
||||
|
||||
Write-Host -ForegroundColor Red "DEBUG: print all environment variables to check how Jenkins runs this script"
|
||||
$allArgs = [Environment]::GetCommandLineArgs()
|
||||
Write-Host -ForegroundColor Red $allArgs
|
||||
|
@ -100,11 +94,6 @@ Write-Host -ForegroundColor Red "-----------------------------------------------
|
|||
# WINDOWS_BASE_IMAGE_TAG if defined, uses that as the tag name for the base image.
|
||||
# if no set, defaults to latest
|
||||
#
|
||||
# LCOW_BASIC_MODE if defined, does very basic LCOW verification. Ultimately we
|
||||
# want to run the entire CI suite from docker, but that's a way off.
|
||||
#
|
||||
# LCOW_MODE if defined, runs the entire CI suite
|
||||
#
|
||||
# -------------------------------------------------------------------------------------------
|
||||
#
|
||||
# Jenkins Integration. Add a Windows Powershell build step as follows:
|
||||
|
@ -628,11 +617,6 @@ Try {
|
|||
Write-Host -ForegroundColor Green "INFO: Args: $dutArgs"
|
||||
New-Item -ItemType Directory $env:TEMP\daemon -ErrorAction SilentlyContinue | Out-Null
|
||||
|
||||
# In LCOW mode, for now we need to set an environment variable before starting the daemon under test
|
||||
if (($null -ne $env:LCOW_MODE) -or ($null -ne $env:LCOW_BASIC_MODE)) {
|
||||
$env:LCOW_SUPPORTED=1
|
||||
}
|
||||
|
||||
# Cannot fathom why, but always writes to stderr....
|
||||
Start-Process "$env:TEMP\binary\dockerd-$COMMITHASH" `
|
||||
-ArgumentList $dutArgs `
|
||||
|
@ -641,12 +625,6 @@ Try {
|
|||
Write-Host -ForegroundColor Green "INFO: Process started successfully."
|
||||
$daemonStarted=1
|
||||
|
||||
# In LCOW mode, turn off that variable
|
||||
if (($null -ne $env:LCOW_MODE) -or ($null -ne $env:LCOW_BASIC_MODE)) {
|
||||
$env:LCOW_SUPPORTED=""
|
||||
}
|
||||
|
||||
|
||||
# Start tailing the daemon under test if the command is installed
|
||||
if ($null -ne (Get-Command "tail" -ErrorAction SilentlyContinue)) {
|
||||
Write-Host -ForegroundColor green "INFO: Start tailing logs of the daemon under tests"
|
||||
|
@ -706,63 +684,59 @@ Try {
|
|||
}
|
||||
Write-Host
|
||||
|
||||
# Don't need Windows images when in LCOW mode.
|
||||
if (($null -eq $env:LCOW_MODE) -and ($null -eq $env:LCOW_BASIC_MODE)) {
|
||||
|
||||
# Default to windowsservercore for the base image used for the tests. The "docker" image
|
||||
# and the control daemon use microsoft/windowsservercore regardless. This is *JUST* for the tests.
|
||||
if ($null -eq $env:WINDOWS_BASE_IMAGE) {
|
||||
$env:WINDOWS_BASE_IMAGE="microsoft/windowsservercore"
|
||||
}
|
||||
if ($null -eq $env:WINDOWS_BASE_IMAGE_TAG) {
|
||||
$env:WINDOWS_BASE_IMAGE_TAG="latest"
|
||||
}
|
||||
|
||||
# Lowercase and make sure it has a microsoft/ prefix
|
||||
$env:WINDOWS_BASE_IMAGE = $env:WINDOWS_BASE_IMAGE.ToLower()
|
||||
if (! $($env:WINDOWS_BASE_IMAGE -Split "/")[0] -match "microsoft") {
|
||||
Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/ or mcr.microsoft.com/"
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Base image for tests is $env:WINDOWS_BASE_IMAGE"
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" | Measure-Object -Line).Lines) -eq 0) {
|
||||
# Try the internal azure CI image version or Microsoft internal corpnet where the base image is already pre-prepared on the disk,
|
||||
# either through Invoke-DockerCI or, in the case of Azure CI servers, baked into the VHD at the same location.
|
||||
if (Test-Path $("c:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")) {
|
||||
Write-Host -ForegroundColor Green "INFO: Loading"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]".tar from disk into the daemon under test. This may take some time..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" load -i $("$readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to load $readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar into daemon under test")
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: docker load of"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]" into daemon under test completed successfully"
|
||||
} else {
|
||||
# We need to docker pull it instead. It will come in directly as microsoft/imagename:tagname
|
||||
Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":"+$env:WINDOWS_BASE_IMAGE_TAG+" from docker hub into daemon under test. This may take some time...")
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG"
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test.")
|
||||
}
|
||||
Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test completed successfully")
|
||||
Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage in daemon under test")
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage
|
||||
}
|
||||
} else {
|
||||
Write-Host -ForegroundColor Green "INFO: Image $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is already loaded in the daemon under test"
|
||||
}
|
||||
|
||||
|
||||
# Inspect the pulled or loaded image to get the version directly
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$dutimgVersion = $(&"$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" inspect "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" --format "{{.OsVersion}}")
|
||||
$ErrorActionPreference = "Stop"
|
||||
Write-Host -ForegroundColor Green $("INFO: Version of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is '"+$dutimgVersion+"'")
|
||||
# Default to windowsservercore for the base image used for the tests. The "docker" image
|
||||
# and the control daemon use microsoft/windowsservercore regardless. This is *JUST* for the tests.
|
||||
if ($null -eq $env:WINDOWS_BASE_IMAGE) {
|
||||
$env:WINDOWS_BASE_IMAGE="microsoft/windowsservercore"
|
||||
}
|
||||
if ($null -eq $env:WINDOWS_BASE_IMAGE_TAG) {
|
||||
$env:WINDOWS_BASE_IMAGE_TAG="latest"
|
||||
}
|
||||
|
||||
# Lowercase and make sure it has a microsoft/ prefix
|
||||
$env:WINDOWS_BASE_IMAGE = $env:WINDOWS_BASE_IMAGE.ToLower()
|
||||
if (! $($env:WINDOWS_BASE_IMAGE -Split "/")[0] -match "microsoft") {
|
||||
Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/ or mcr.microsoft.com/"
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Base image for tests is $env:WINDOWS_BASE_IMAGE"
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" | Measure-Object -Line).Lines) -eq 0) {
|
||||
# Try the internal azure CI image version or Microsoft internal corpnet where the base image is already pre-prepared on the disk,
|
||||
# either through Invoke-DockerCI or, in the case of Azure CI servers, baked into the VHD at the same location.
|
||||
if (Test-Path $("c:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")) {
|
||||
Write-Host -ForegroundColor Green "INFO: Loading"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]".tar from disk into the daemon under test. This may take some time..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" load -i $("$readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to load $readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar into daemon under test")
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: docker load of"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]" into daemon under test completed successfully"
|
||||
} else {
|
||||
# We need to docker pull it instead. It will come in directly as microsoft/imagename:tagname
|
||||
Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":"+$env:WINDOWS_BASE_IMAGE_TAG+" from docker hub into daemon under test. This may take some time...")
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG"
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test.")
|
||||
}
|
||||
Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test completed successfully")
|
||||
Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage in daemon under test")
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage
|
||||
}
|
||||
} else {
|
||||
Write-Host -ForegroundColor Green "INFO: Image $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is already loaded in the daemon under test"
|
||||
}
|
||||
|
||||
|
||||
# Inspect the pulled or loaded image to get the version directly
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$dutimgVersion = $(&"$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" inspect "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" --format "{{.OsVersion}}")
|
||||
$ErrorActionPreference = "Stop"
|
||||
Write-Host -ForegroundColor Green $("INFO: Version of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is '"+$dutimgVersion+"'")
|
||||
|
||||
# Run the validation tests unless SKIP_VALIDATION_TESTS is defined.
|
||||
if ($null -eq $env:SKIP_VALIDATION_TESTS) {
|
||||
|
@ -778,193 +752,122 @@ Try {
|
|||
Write-Host -ForegroundColor Magenta "WARN: Skipping validation tests"
|
||||
}
|
||||
|
||||
# Note the unit tests won't work in LCOW mode as I turned off loading the base images above.
|
||||
# Run the unit tests inside a container unless SKIP_UNIT_TESTS is defined
|
||||
if (($null -eq $env:LCOW_MODE) -and ($null -eq $env:LCOW_BASIC_MODE)) {
|
||||
if ($null -eq $env:SKIP_UNIT_TESTS) {
|
||||
$ContainerNameForUnitTests = $COMMITHASH + "_UnitTests"
|
||||
Write-Host -ForegroundColor Cyan "INFO: Running unit tests at $(Get-Date)..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$Duration=$(Measure-Command {docker run --name $ContainerNameForUnitTests -e DOCKER_GITCOMMIT=$COMMITHASH$CommitUnsupported docker hack\make.ps1 -TestUnit | Out-Host })
|
||||
$TestRunExitCode = $LastExitCode
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ($null -eq $env:SKIP_UNIT_TESTS) {
|
||||
$ContainerNameForUnitTests = $COMMITHASH + "_UnitTests"
|
||||
Write-Host -ForegroundColor Cyan "INFO: Running unit tests at $(Get-Date)..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$Duration=$(Measure-Command {docker run --name $ContainerNameForUnitTests -e DOCKER_GITCOMMIT=$COMMITHASH$CommitUnsupported docker hack\make.ps1 -TestUnit | Out-Host })
|
||||
$TestRunExitCode = $LastExitCode
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
# Saving where jenkins will take a look at.....
|
||||
New-Item -Force -ItemType Directory bundles | Out-Null
|
||||
$unitTestsContPath="$ContainerNameForUnitTests`:c`:\gopath\src\github.com\docker\docker\bundles"
|
||||
$JunitExpectedContFilePath = "$unitTestsContPath\junit-report-unit-tests.xml"
|
||||
docker cp $JunitExpectedContFilePath "bundles"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to docker cp the unit tests report ($JunitExpectedContFilePath) to bundles"
|
||||
}
|
||||
|
||||
if (Test-Path "bundles\junit-report-unit-tests.xml") {
|
||||
Write-Host -ForegroundColor Magenta "INFO: Unit tests results(bundles\junit-report-unit-tests.xml) exist. pwd=$pwd"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "ERROR: Unit tests results(bundles\junit-report-unit-tests.xml) do not exist. pwd=$pwd"
|
||||
}
|
||||
|
||||
if (-not($TestRunExitCode -eq 0)) {
|
||||
Throw "ERROR: Unit tests failed"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Unit tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping unit tests"
|
||||
# Saving where jenkins will take a look at.....
|
||||
New-Item -Force -ItemType Directory bundles | Out-Null
|
||||
$unitTestsContPath="$ContainerNameForUnitTests`:c`:\gopath\src\github.com\docker\docker\bundles"
|
||||
$JunitExpectedContFilePath = "$unitTestsContPath\junit-report-unit-tests.xml"
|
||||
docker cp $JunitExpectedContFilePath "bundles"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to docker cp the unit tests report ($JunitExpectedContFilePath) to bundles"
|
||||
}
|
||||
|
||||
if (Test-Path "bundles\junit-report-unit-tests.xml") {
|
||||
Write-Host -ForegroundColor Magenta "INFO: Unit tests results(bundles\junit-report-unit-tests.xml) exist. pwd=$pwd"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "ERROR: Unit tests results(bundles\junit-report-unit-tests.xml) do not exist. pwd=$pwd"
|
||||
}
|
||||
|
||||
if (-not($TestRunExitCode -eq 0)) {
|
||||
Throw "ERROR: Unit tests failed"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Unit tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping unit tests"
|
||||
}
|
||||
|
||||
# Add the Windows busybox image. Needed for WCOW integration tests
|
||||
if (($null -eq $env:LCOW_MODE) -and ($null -eq $env:LCOW_BASIC_MODE)) {
|
||||
if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
|
||||
Write-Host -ForegroundColor Green "INFO: Building busybox"
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox --build-arg WINDOWS_BASE_IMAGE --build-arg WINDOWS_BASE_IMAGE_TAG "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\contrib\busybox\" | Out-Host)
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to build busybox image"
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Docker images of the daemon under test"
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ($LastExitCode -ne 0) {
|
||||
Throw "ERROR: The daemon under test does not appear to be running."
|
||||
}
|
||||
Write-Host
|
||||
if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
|
||||
Write-Host -ForegroundColor Green "INFO: Building busybox"
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox --build-arg WINDOWS_BASE_IMAGE --build-arg WINDOWS_BASE_IMAGE_TAG "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\contrib\busybox\" | Out-Host)
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to build busybox image"
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Docker images of the daemon under test"
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ($LastExitCode -ne 0) {
|
||||
Throw "ERROR: The daemon under test does not appear to be running."
|
||||
}
|
||||
Write-Host
|
||||
}
|
||||
|
||||
# Run the WCOW integration tests unless SKIP_INTEGRATION_TESTS is defined
|
||||
if (($null -eq $env:LCOW_MODE) -and ($null -eq $env:LCOW_BASIC_MODE)) {
|
||||
if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
|
||||
Write-Host -ForegroundColor Cyan "INFO: Running integration tests at $(Get-Date)..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
|
||||
# Location of the daemon under test.
|
||||
$env:OrigDOCKER_HOST="$env:DOCKER_HOST"
|
||||
|
||||
#https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
|
||||
$jsonFilePath = "..\\bundles\\go-test-report-intcli-tests.json"
|
||||
$xmlFilePath = "..\\bundles\\junit-report-intcli-tests.xml"
|
||||
$c = "gotestsum --format=standard-verbose --jsonfile=$jsonFilePath --junitfile=$xmlFilePath -- "
|
||||
if ($null -ne $env:INTEGRATION_TEST_NAME) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
|
||||
$c += "`"-test.run`" "
|
||||
$c += "`"$env:INTEGRATION_TEST_NAME`" "
|
||||
Write-Host -ForegroundColor Magenta "WARN: Only running integration tests matching $env:INTEGRATION_TEST_NAME"
|
||||
}
|
||||
$c += "`"-tags`" " + "`"autogen`" "
|
||||
$c += "`"-test.timeout`" " + "`"200m`" "
|
||||
|
||||
if ($null -ne $env:INTEGRATION_IN_CONTAINER) {
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests being run inside a container"
|
||||
# Note we talk back through the containers gateway address
|
||||
# And the ridiculous lengths we have to go to get the default gateway address... (GetNetIPConfiguration doesn't work in nanoserver)
|
||||
# I just could not get the escaping to work in a single command, so output $c to a file and run that in the container instead...
|
||||
# Not the prettiest, but it works.
|
||||
$c | Out-File -Force "$env:TEMP\binary\runIntegrationCLI.ps1"
|
||||
$Duration= $(Measure-Command { & docker run `
|
||||
--rm `
|
||||
-e c=$c `
|
||||
--workdir "c`:\gopath\src\github.com\docker\docker\integration-cli" `
|
||||
-v "$env:TEMP\binary`:c:\target" `
|
||||
docker `
|
||||
"`$env`:PATH`='c`:\target;'+`$env:PATH`; `$env:DOCKER_HOST`='tcp`://'+(ipconfig | select -last 1).Substring(39)+'`:2357'; c:\target\runIntegrationCLI.ps1" | Out-Host } )
|
||||
} else {
|
||||
$env:DOCKER_HOST=$DASHH_CUT
|
||||
$env:PATH="$env:TEMP\binary;$env:PATH;" # Force to use the test binaries, not the host ones.
|
||||
$env:GO111MODULE="off"
|
||||
Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
|
||||
if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
|
||||
Write-Host -ForegroundColor Cyan "INFO: Running integration tests at $(Get-Date)..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
Write-Host -ForegroundColor Cyan "INFO: Integration API tests being run from the host:"
|
||||
$start=(Get-Date); Invoke-Expression ".\hack\make.ps1 -TestIntegration"; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
$IntTestsRunResult = $LastExitCode
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($IntTestsRunResult -eq 0)) {
|
||||
Throw "ERROR: Integration API tests failed at $(Get-Date). Duration`:$Duration"
|
||||
}
|
||||
# Location of the daemon under test.
|
||||
$env:OrigDOCKER_HOST="$env:DOCKER_HOST"
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
Write-Host -ForegroundColor Green "INFO: Integration CLI tests being run from the host:"
|
||||
Write-Host -ForegroundColor Green "INFO: $c"
|
||||
Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
|
||||
# Explicit to not use measure-command otherwise don't get output as it goes
|
||||
$start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Integration CLI tests failed at $(Get-Date). Duration`:$Duration"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping integration tests"
|
||||
#https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
|
||||
$jsonFilePath = "..\\bundles\\go-test-report-intcli-tests.json"
|
||||
$xmlFilePath = "..\\bundles\\junit-report-intcli-tests.xml"
|
||||
$c = "gotestsum --format=standard-verbose --jsonfile=$jsonFilePath --junitfile=$xmlFilePath -- "
|
||||
if ($null -ne $env:INTEGRATION_TEST_NAME) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
|
||||
$c += "`"-test.run`" "
|
||||
$c += "`"$env:INTEGRATION_TEST_NAME`" "
|
||||
Write-Host -ForegroundColor Magenta "WARN: Only running integration tests matching $env:INTEGRATION_TEST_NAME"
|
||||
}
|
||||
} else {
|
||||
# The LCOW version of the tests here
|
||||
if ($null -eq $env:SKIP_INTEGRATION_TESTS) {
|
||||
Write-Host -ForegroundColor Cyan "INFO: Running LCOW tests at $(Get-Date)..."
|
||||
$c += "`"-tags`" " + "`"autogen`" "
|
||||
$c += "`"-test.timeout`" " + "`"200m`" "
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
|
||||
# Location of the daemon under test.
|
||||
$env:OrigDOCKER_HOST="$env:DOCKER_HOST"
|
||||
|
||||
# Make sure we are pointing at the DUT
|
||||
$env:DOCKER_HOST=$DASHH_CUT
|
||||
if ($null -ne $env:INTEGRATION_IN_CONTAINER) {
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests being run inside a container"
|
||||
# Note we talk back through the containers gateway address
|
||||
# And the ridiculous lengths we have to go to get the default gateway address... (GetNetIPConfiguration doesn't work in nanoserver)
|
||||
# I just could not get the escaping to work in a single command, so output $c to a file and run that in the container instead...
|
||||
# Not the prettiest, but it works.
|
||||
$c | Out-File -Force "$env:TEMP\binary\runIntegrationCLI.ps1"
|
||||
$Duration= $(Measure-Command { & docker run `
|
||||
--rm `
|
||||
-e c=$c `
|
||||
--workdir "c`:\gopath\src\github.com\docker\docker\integration-cli" `
|
||||
-v "$env:TEMP\binary`:c:\target" `
|
||||
docker `
|
||||
"`$env`:PATH`='c`:\target;'+`$env:PATH`; `$env:DOCKER_HOST`='tcp`://'+(ipconfig | select -last 1).Substring(39)+'`:2357'; c:\target\runIntegrationCLI.ps1" | Out-Host } )
|
||||
} else {
|
||||
$env:DOCKER_HOST=$DASHH_CUT
|
||||
$env:PATH="$env:TEMP\binary;$env:PATH;" # Force to use the test binaries, not the host ones.
|
||||
$env:GO111MODULE="off"
|
||||
Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
|
||||
|
||||
# Force to use the test binaries, not the host ones.
|
||||
$env:PATH="$env:TEMP\binary;$env:PATH;"
|
||||
|
||||
if ($null -ne $env:LCOW_BASIC_MODE) {
|
||||
$wc = New-Object net.webclient
|
||||
try {
|
||||
Write-Host -ForegroundColor green "INFO: Downloading latest execution script..."
|
||||
$wc.Downloadfile("https://raw.githubusercontent.com/kevpar/docker-w2wCIScripts/master/runCI/lcowbasicvalidation.ps1", "$env:TEMP\binary\lcowbasicvalidation.ps1")
|
||||
}
|
||||
catch [System.Net.WebException]
|
||||
{
|
||||
Throw ("Failed to download: $_")
|
||||
}
|
||||
|
||||
# Explicit to not use measure-command otherwise don't get output as it goes
|
||||
$ErrorActionPreference = "Stop"
|
||||
$start=(Get-Date); Invoke-Expression "powershell $env:TEMP\binary\lcowbasicvalidation.ps1"; $lec=$lastExitCode; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
$Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
Write-Host -ForegroundColor Green "INFO: LCOW tests ended at $(Get-Date). Duration`:$Duration"
|
||||
if ($lec -ne 0) {
|
||||
Throw "LCOW validation tests failed"
|
||||
}
|
||||
} else {
|
||||
#https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
|
||||
$c = "go test "
|
||||
$c += "`"-test.v`" "
|
||||
if ($null -ne $env:INTEGRATION_TEST_NAME) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
|
||||
$c += "`"-test.run`" "
|
||||
$c += "`"$env:INTEGRATION_TEST_NAME`" "
|
||||
Write-Host -ForegroundColor Magenta "WARN: Only running LCOW integration tests matching $env:INTEGRATION_TEST_NAME"
|
||||
}
|
||||
$c += "`"-tags`" " + "`"autogen`" "
|
||||
$c += "`"-test.timeout`" " + "`"200m`" "
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: LCOW Integration tests being run from the host:"
|
||||
Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
|
||||
Write-Host -ForegroundColor Green "INFO: $c"
|
||||
Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
|
||||
# Explicit to not use measure-command otherwise don't get output as it goes
|
||||
$start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
|
||||
}
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
Write-Host -ForegroundColor Cyan "INFO: Integration API tests being run from the host:"
|
||||
$start=(Get-Date); Invoke-Expression ".\hack\make.ps1 -TestIntegration"; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
$IntTestsRunResult = $LastExitCode
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Integration tests failed at $(Get-Date). Duration`:$Duration"
|
||||
if (-not($IntTestsRunResult -eq 0)) {
|
||||
Throw "ERROR: Integration API tests failed at $(Get-Date). Duration`:$Duration"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping LCOW tests"
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
Write-Host -ForegroundColor Green "INFO: Integration CLI tests being run from the host:"
|
||||
Write-Host -ForegroundColor Green "INFO: $c"
|
||||
Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
|
||||
# Explicit to not use measure-command otherwise don't get output as it goes
|
||||
$start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Integration CLI tests failed at $(Get-Date). Duration`:$Duration"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping integration tests"
|
||||
}
|
||||
|
||||
# Docker info now to get counts (after or if jjh/containercounts is merged)
|
||||
|
|
|
@ -426,12 +426,8 @@ func checkCompatibleOS(imageOS string) error {
|
|||
return fmt.Errorf("cannot load %s image on %s", imageOS, runtime.GOOS)
|
||||
}
|
||||
|
||||
p, err := platforms.Parse(imageOS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return system.ValidatePlatform(p)
|
||||
_, err := platforms.Parse(imageOS)
|
||||
return err
|
||||
}
|
||||
|
||||
func validateManifest(manifest []manifestItem) error {
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
// +build windows,!no_lcow
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// lcowSupported determines if Linux Containers on Windows are supported.
|
||||
lcowSupported = false
|
||||
)
|
||||
|
||||
// InitLCOW sets whether LCOW is supported or not. Requires RS5+
|
||||
func InitLCOW(experimental bool) {
|
||||
if experimental && osversion.Build() >= osversion.RS5 {
|
||||
lcowSupported = true
|
||||
}
|
||||
}
|
||||
|
||||
func LCOWSupported() bool {
|
||||
return lcowSupported
|
||||
}
|
||||
|
||||
// ValidatePlatform determines if a platform structure is valid.
|
||||
// TODO This is a temporary windows-only function, should be replaced by
|
||||
// comparison of worker capabilities
|
||||
func ValidatePlatform(platform specs.Platform) error {
|
||||
if !IsOSSupported(platform.OS) {
|
||||
return errors.Errorf("unsupported os %s", platform.OS)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsOSSupported determines if an operating system is supported by the host
|
||||
func IsOSSupported(os string) bool {
|
||||
if strings.EqualFold("windows", os) {
|
||||
return true
|
||||
}
|
||||
if LCOWSupported() && strings.EqualFold(os, "linux") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,27 +1,14 @@
|
|||
// +build !windows windows,no_lcow
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// InitLCOW does nothing since LCOW is a windows only feature
|
||||
func InitLCOW(_ bool) {}
|
||||
|
||||
// LCOWSupported returns true if Linux containers on Windows are supported.
|
||||
func LCOWSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidatePlatform determines if a platform structure is valid. This function
|
||||
// is used for LCOW, and is a no-op on non-windows platforms.
|
||||
func ValidatePlatform(_ specs.Platform) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsOSSupported determines if an operating system is supported by the host.
|
||||
func IsOSSupported(os string) bool {
|
||||
return strings.EqualFold(runtime.GOOS, os)
|
||||
|
|
|
@ -1,24 +1,15 @@
|
|||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
||||
// DefaultPathEnv is unix style list of directories to search for
|
||||
// executables. Each directory is separated from the next by a colon
|
||||
// ':' character .
|
||||
// For Windows containers, an empty string is returned as the default
|
||||
// path will be set by the container, and Docker has no context of what the
|
||||
// default path should be.
|
||||
func DefaultPathEnv(os string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
if os != runtime.GOOS {
|
||||
return defaultUnixPathEnv
|
||||
}
|
||||
// Deliberately empty on Windows containers on Windows as the default path will be set by
|
||||
// the container. Docker has no context of what the default path should be.
|
||||
if os == "windows" {
|
||||
return ""
|
||||
}
|
||||
return defaultUnixPathEnv
|
||||
|
@ -47,18 +38,5 @@ type PathVerifier interface {
|
|||
// /a --> \a
|
||||
// d:\ --> Fail
|
||||
func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
|
||||
if runtime.GOOS != "windows" || LCOWSupported() {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if len(path) == 2 && string(path[1]) == ":" {
|
||||
return "", fmt.Errorf("No relative path specified in %q", path)
|
||||
}
|
||||
if !driver.IsAbs(path) || len(path) < 2 {
|
||||
return filepath.FromSlash(path), nil
|
||||
}
|
||||
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
|
||||
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
|
||||
}
|
||||
return filepath.FromSlash(path[2:]), nil
|
||||
return checkSystemDriveAndRemoveDriveLetter(path, driver)
|
||||
}
|
||||
|
|
|
@ -8,3 +8,9 @@ package system // import "github.com/docker/docker/pkg/system"
|
|||
func GetLongPathName(path string) (string, error) {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation
|
||||
// of CheckSystemDriveAndRemoveDriveLetter
|
||||
func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
|
||||
return path, nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "golang.org/x/sys/windows"
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// GetLongPathName converts Windows short pathnames to full pathnames.
|
||||
// For example C:\Users\ADMIN~1 --> C:\Users\Administrator.
|
||||
|
@ -25,3 +31,18 @@ func GetLongPathName(path string) (string, error) {
|
|||
}
|
||||
return windows.UTF16ToString(b), nil
|
||||
}
|
||||
|
||||
// checkSystemDriveAndRemoveDriveLetter is the Windows implementation
|
||||
// of CheckSystemDriveAndRemoveDriveLetter
|
||||
func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
|
||||
if len(path) == 2 && string(path[1]) == ":" {
|
||||
return "", fmt.Errorf("No relative path specified in %q", path)
|
||||
}
|
||||
if !driver.IsAbs(path) || len(path) < 2 {
|
||||
return filepath.FromSlash(path), nil
|
||||
}
|
||||
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
|
||||
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
|
||||
}
|
||||
return filepath.FromSlash(path[2:]), nil
|
||||
}
|
||||
|
|
|
@ -185,12 +185,6 @@ NOTE: if you need to set more than one build tag, space separate them:
|
|||
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
|
||||
```
|
||||
|
||||
### LCOW (Linux Containers On Windows)
|
||||
|
||||
LCOW is an experimental feature on Windows, and requires the daemon to run with
|
||||
experimental features enabled. Use the `no_lcow` build tag to disable the LCOW
|
||||
feature at compile time,
|
||||
|
||||
### Static Daemon
|
||||
|
||||
If it is feasible within the constraints of your distribution, you should
|
||||
|
|
1325
vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go
generated
vendored
1325
vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go
generated
vendored
File diff suppressed because it is too large
Load diff
411
vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go
generated
vendored
411
vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go
generated
vendored
|
@ -1,411 +0,0 @@
|
|||
package format
|
||||
|
||||
type SuperBlock struct {
|
||||
InodesCount uint32
|
||||
BlocksCountLow uint32
|
||||
RootBlocksCountLow uint32
|
||||
FreeBlocksCountLow uint32
|
||||
FreeInodesCount uint32
|
||||
FirstDataBlock uint32
|
||||
LogBlockSize uint32
|
||||
LogClusterSize uint32
|
||||
BlocksPerGroup uint32
|
||||
ClustersPerGroup uint32
|
||||
InodesPerGroup uint32
|
||||
Mtime uint32
|
||||
Wtime uint32
|
||||
MountCount uint16
|
||||
MaxMountCount uint16
|
||||
Magic uint16
|
||||
State uint16
|
||||
Errors uint16
|
||||
MinorRevisionLevel uint16
|
||||
LastCheck uint32
|
||||
CheckInterval uint32
|
||||
CreatorOS uint32
|
||||
RevisionLevel uint32
|
||||
DefaultReservedUid uint16
|
||||
DefaultReservedGid uint16
|
||||
FirstInode uint32
|
||||
InodeSize uint16
|
||||
BlockGroupNr uint16
|
||||
FeatureCompat CompatFeature
|
||||
FeatureIncompat IncompatFeature
|
||||
FeatureRoCompat RoCompatFeature
|
||||
UUID [16]uint8
|
||||
VolumeName [16]byte
|
||||
LastMounted [64]byte
|
||||
AlgorithmUsageBitmap uint32
|
||||
PreallocBlocks uint8
|
||||
PreallocDirBlocks uint8
|
||||
ReservedGdtBlocks uint16
|
||||
JournalUUID [16]uint8
|
||||
JournalInum uint32
|
||||
JournalDev uint32
|
||||
LastOrphan uint32
|
||||
HashSeed [4]uint32
|
||||
DefHashVersion uint8
|
||||
JournalBackupType uint8
|
||||
DescSize uint16
|
||||
DefaultMountOpts uint32
|
||||
FirstMetaBg uint32
|
||||
MkfsTime uint32
|
||||
JournalBlocks [17]uint32
|
||||
BlocksCountHigh uint32
|
||||
RBlocksCountHigh uint32
|
||||
FreeBlocksCountHigh uint32
|
||||
MinExtraIsize uint16
|
||||
WantExtraIsize uint16
|
||||
Flags uint32
|
||||
RaidStride uint16
|
||||
MmpInterval uint16
|
||||
MmpBlock uint64
|
||||
RaidStripeWidth uint32
|
||||
LogGroupsPerFlex uint8
|
||||
ChecksumType uint8
|
||||
ReservedPad uint16
|
||||
KbytesWritten uint64
|
||||
SnapshotInum uint32
|
||||
SnapshotID uint32
|
||||
SnapshotRBlocksCount uint64
|
||||
SnapshotList uint32
|
||||
ErrorCount uint32
|
||||
FirstErrorTime uint32
|
||||
FirstErrorInode uint32
|
||||
FirstErrorBlock uint64
|
||||
FirstErrorFunc [32]uint8
|
||||
FirstErrorLine uint32
|
||||
LastErrorTime uint32
|
||||
LastErrorInode uint32
|
||||
LastErrorLine uint32
|
||||
LastErrorBlock uint64
|
||||
LastErrorFunc [32]uint8
|
||||
MountOpts [64]uint8
|
||||
UserQuotaInum uint32
|
||||
GroupQuotaInum uint32
|
||||
OverheadBlocks uint32
|
||||
BackupBgs [2]uint32
|
||||
EncryptAlgos [4]uint8
|
||||
EncryptPwSalt [16]uint8
|
||||
LpfInode uint32
|
||||
ProjectQuotaInum uint32
|
||||
ChecksumSeed uint32
|
||||
WtimeHigh uint8
|
||||
MtimeHigh uint8
|
||||
MkfsTimeHigh uint8
|
||||
LastcheckHigh uint8
|
||||
FirstErrorTimeHigh uint8
|
||||
LastErrorTimeHigh uint8
|
||||
Pad [2]uint8
|
||||
Reserved [96]uint32
|
||||
Checksum uint32
|
||||
}
|
||||
|
||||
const SuperBlockMagic uint16 = 0xef53
|
||||
|
||||
type CompatFeature uint32
|
||||
type IncompatFeature uint32
|
||||
type RoCompatFeature uint32
|
||||
|
||||
const (
|
||||
CompatDirPrealloc CompatFeature = 0x1
|
||||
CompatImagicInodes CompatFeature = 0x2
|
||||
CompatHasJournal CompatFeature = 0x4
|
||||
CompatExtAttr CompatFeature = 0x8
|
||||
CompatResizeInode CompatFeature = 0x10
|
||||
CompatDirIndex CompatFeature = 0x20
|
||||
CompatLazyBg CompatFeature = 0x40
|
||||
CompatExcludeInode CompatFeature = 0x80
|
||||
CompatExcludeBitmap CompatFeature = 0x100
|
||||
CompatSparseSuper2 CompatFeature = 0x200
|
||||
|
||||
IncompatCompression IncompatFeature = 0x1
|
||||
IncompatFiletype IncompatFeature = 0x2
|
||||
IncompatRecover IncompatFeature = 0x4
|
||||
IncompatJournalDev IncompatFeature = 0x8
|
||||
IncompatMetaBg IncompatFeature = 0x10
|
||||
IncompatExtents IncompatFeature = 0x40
|
||||
Incompat_64Bit IncompatFeature = 0x80
|
||||
IncompatMmp IncompatFeature = 0x100
|
||||
IncompatFlexBg IncompatFeature = 0x200
|
||||
IncompatEaInode IncompatFeature = 0x400
|
||||
IncompatDirdata IncompatFeature = 0x1000
|
||||
IncompatCsumSeed IncompatFeature = 0x2000
|
||||
IncompatLargedir IncompatFeature = 0x4000
|
||||
IncompatInlineData IncompatFeature = 0x8000
|
||||
IncompatEncrypt IncompatFeature = 0x10000
|
||||
|
||||
RoCompatSparseSuper RoCompatFeature = 0x1
|
||||
RoCompatLargeFile RoCompatFeature = 0x2
|
||||
RoCompatBtreeDir RoCompatFeature = 0x4
|
||||
RoCompatHugeFile RoCompatFeature = 0x8
|
||||
RoCompatGdtCsum RoCompatFeature = 0x10
|
||||
RoCompatDirNlink RoCompatFeature = 0x20
|
||||
RoCompatExtraIsize RoCompatFeature = 0x40
|
||||
RoCompatHasSnapshot RoCompatFeature = 0x80
|
||||
RoCompatQuota RoCompatFeature = 0x100
|
||||
RoCompatBigalloc RoCompatFeature = 0x200
|
||||
RoCompatMetadataCsum RoCompatFeature = 0x400
|
||||
RoCompatReplica RoCompatFeature = 0x800
|
||||
RoCompatReadonly RoCompatFeature = 0x1000
|
||||
RoCompatProject RoCompatFeature = 0x2000
|
||||
)
|
||||
|
||||
type BlockGroupFlag uint16
|
||||
|
||||
const (
|
||||
BlockGroupInodeUninit BlockGroupFlag = 0x1
|
||||
BlockGroupBlockUninit BlockGroupFlag = 0x2
|
||||
BlockGroupInodeZeroed BlockGroupFlag = 0x4
|
||||
)
|
||||
|
||||
type GroupDescriptor struct {
|
||||
BlockBitmapLow uint32
|
||||
InodeBitmapLow uint32
|
||||
InodeTableLow uint32
|
||||
FreeBlocksCountLow uint16
|
||||
FreeInodesCountLow uint16
|
||||
UsedDirsCountLow uint16
|
||||
Flags BlockGroupFlag
|
||||
ExcludeBitmapLow uint32
|
||||
BlockBitmapCsumLow uint16
|
||||
InodeBitmapCsumLow uint16
|
||||
ItableUnusedLow uint16
|
||||
Checksum uint16
|
||||
}
|
||||
|
||||
type GroupDescriptor64 struct {
|
||||
GroupDescriptor
|
||||
BlockBitmapHigh uint32
|
||||
InodeBitmapHigh uint32
|
||||
InodeTableHigh uint32
|
||||
FreeBlocksCountHigh uint16
|
||||
FreeInodesCountHigh uint16
|
||||
UsedDirsCountHigh uint16
|
||||
ItableUnusedHigh uint16
|
||||
ExcludeBitmapHigh uint32
|
||||
BlockBitmapCsumHigh uint16
|
||||
InodeBitmapCsumHigh uint16
|
||||
Reserved uint32
|
||||
}
|
||||
|
||||
const (
|
||||
S_IXOTH = 0x1
|
||||
S_IWOTH = 0x2
|
||||
S_IROTH = 0x4
|
||||
S_IXGRP = 0x8
|
||||
S_IWGRP = 0x10
|
||||
S_IRGRP = 0x20
|
||||
S_IXUSR = 0x40
|
||||
S_IWUSR = 0x80
|
||||
S_IRUSR = 0x100
|
||||
S_ISVTX = 0x200
|
||||
S_ISGID = 0x400
|
||||
S_ISUID = 0x800
|
||||
S_IFIFO = 0x1000
|
||||
S_IFCHR = 0x2000
|
||||
S_IFDIR = 0x4000
|
||||
S_IFBLK = 0x6000
|
||||
S_IFREG = 0x8000
|
||||
S_IFLNK = 0xA000
|
||||
S_IFSOCK = 0xC000
|
||||
|
||||
TypeMask uint16 = 0xF000
|
||||
)
|
||||
|
||||
type InodeNumber uint32
|
||||
|
||||
const (
|
||||
InodeRoot = 2
|
||||
)
|
||||
|
||||
type Inode struct {
|
||||
Mode uint16
|
||||
Uid uint16
|
||||
SizeLow uint32
|
||||
Atime uint32
|
||||
Ctime uint32
|
||||
Mtime uint32
|
||||
Dtime uint32
|
||||
Gid uint16
|
||||
LinksCount uint16
|
||||
BlocksLow uint32
|
||||
Flags InodeFlag
|
||||
Version uint32
|
||||
Block [60]byte
|
||||
Generation uint32
|
||||
XattrBlockLow uint32
|
||||
SizeHigh uint32
|
||||
ObsoleteFragmentAddr uint32
|
||||
BlocksHigh uint16
|
||||
XattrBlockHigh uint16
|
||||
UidHigh uint16
|
||||
GidHigh uint16
|
||||
ChecksumLow uint16
|
||||
Reserved uint16
|
||||
ExtraIsize uint16
|
||||
ChecksumHigh uint16
|
||||
CtimeExtra uint32
|
||||
MtimeExtra uint32
|
||||
AtimeExtra uint32
|
||||
Crtime uint32
|
||||
CrtimeExtra uint32
|
||||
VersionHigh uint32
|
||||
Projid uint32
|
||||
}
|
||||
|
||||
type InodeFlag uint32
|
||||
|
||||
const (
|
||||
InodeFlagSecRm InodeFlag = 0x1
|
||||
InodeFlagUnRm InodeFlag = 0x2
|
||||
InodeFlagCompressed InodeFlag = 0x4
|
||||
InodeFlagSync InodeFlag = 0x8
|
||||
InodeFlagImmutable InodeFlag = 0x10
|
||||
InodeFlagAppend InodeFlag = 0x20
|
||||
InodeFlagNoDump InodeFlag = 0x40
|
||||
InodeFlagNoAtime InodeFlag = 0x80
|
||||
InodeFlagDirtyCompressed InodeFlag = 0x100
|
||||
InodeFlagCompressedClusters InodeFlag = 0x200
|
||||
InodeFlagNoCompress InodeFlag = 0x400
|
||||
InodeFlagEncrypted InodeFlag = 0x800
|
||||
InodeFlagHashedIndex InodeFlag = 0x1000
|
||||
InodeFlagMagic InodeFlag = 0x2000
|
||||
InodeFlagJournalData InodeFlag = 0x4000
|
||||
InodeFlagNoTail InodeFlag = 0x8000
|
||||
InodeFlagDirSync InodeFlag = 0x10000
|
||||
InodeFlagTopDir InodeFlag = 0x20000
|
||||
InodeFlagHugeFile InodeFlag = 0x40000
|
||||
InodeFlagExtents InodeFlag = 0x80000
|
||||
InodeFlagEaInode InodeFlag = 0x200000
|
||||
InodeFlagEOFBlocks InodeFlag = 0x400000
|
||||
InodeFlagSnapfile InodeFlag = 0x01000000
|
||||
InodeFlagSnapfileDeleted InodeFlag = 0x04000000
|
||||
InodeFlagSnapfileShrunk InodeFlag = 0x08000000
|
||||
InodeFlagInlineData InodeFlag = 0x10000000
|
||||
InodeFlagProjectIDInherit InodeFlag = 0x20000000
|
||||
InodeFlagReserved InodeFlag = 0x80000000
|
||||
)
|
||||
|
||||
const (
|
||||
MaxLinks = 65000
|
||||
)
|
||||
|
||||
type ExtentHeader struct {
|
||||
Magic uint16
|
||||
Entries uint16
|
||||
Max uint16
|
||||
Depth uint16
|
||||
Generation uint32
|
||||
}
|
||||
|
||||
const ExtentHeaderMagic uint16 = 0xf30a
|
||||
|
||||
type ExtentIndexNode struct {
|
||||
Block uint32
|
||||
LeafLow uint32
|
||||
LeafHigh uint16
|
||||
Unused uint16
|
||||
}
|
||||
|
||||
type ExtentLeafNode struct {
|
||||
Block uint32
|
||||
Length uint16
|
||||
StartHigh uint16
|
||||
StartLow uint32
|
||||
}
|
||||
|
||||
type ExtentTail struct {
|
||||
Checksum uint32
|
||||
}
|
||||
|
||||
type DirectoryEntry struct {
|
||||
Inode InodeNumber
|
||||
RecordLength uint16
|
||||
NameLength uint8
|
||||
FileType FileType
|
||||
//Name []byte
|
||||
}
|
||||
|
||||
type FileType uint8
|
||||
|
||||
const (
|
||||
FileTypeUnknown FileType = 0x0
|
||||
FileTypeRegular FileType = 0x1
|
||||
FileTypeDirectory FileType = 0x2
|
||||
FileTypeCharacter FileType = 0x3
|
||||
FileTypeBlock FileType = 0x4
|
||||
FileTypeFIFO FileType = 0x5
|
||||
FileTypeSocket FileType = 0x6
|
||||
FileTypeSymbolicLink FileType = 0x7
|
||||
)
|
||||
|
||||
type DirectoryEntryTail struct {
|
||||
ReservedZero1 uint32
|
||||
RecordLength uint16
|
||||
ReservedZero2 uint8
|
||||
FileType uint8
|
||||
Checksum uint32
|
||||
}
|
||||
|
||||
type DirectoryTreeRoot struct {
|
||||
Dot DirectoryEntry
|
||||
DotName [4]byte
|
||||
DotDot DirectoryEntry
|
||||
DotDotName [4]byte
|
||||
ReservedZero uint32
|
||||
HashVersion uint8
|
||||
InfoLength uint8
|
||||
IndirectLevels uint8
|
||||
UnusedFlags uint8
|
||||
Limit uint16
|
||||
Count uint16
|
||||
Block uint32
|
||||
//Entries []DirectoryTreeEntry
|
||||
}
|
||||
|
||||
type DirectoryTreeNode struct {
|
||||
FakeInode uint32
|
||||
FakeRecordLength uint16
|
||||
NameLength uint8
|
||||
FileType uint8
|
||||
Limit uint16
|
||||
Count uint16
|
||||
Block uint32
|
||||
//Entries []DirectoryTreeEntry
|
||||
}
|
||||
|
||||
type DirectoryTreeEntry struct {
|
||||
Hash uint32
|
||||
Block uint32
|
||||
}
|
||||
|
||||
type DirectoryTreeTail struct {
|
||||
Reserved uint32
|
||||
Checksum uint32
|
||||
}
|
||||
|
||||
type XAttrInodeBodyHeader struct {
|
||||
Magic uint32
|
||||
}
|
||||
|
||||
type XAttrHeader struct {
|
||||
Magic uint32
|
||||
ReferenceCount uint32
|
||||
Blocks uint32
|
||||
Hash uint32
|
||||
Checksum uint32
|
||||
Reserved [3]uint32
|
||||
}
|
||||
|
||||
const XAttrHeaderMagic uint32 = 0xea020000
|
||||
|
||||
type XAttrEntry struct {
|
||||
NameLength uint8
|
||||
NameIndex uint8
|
||||
ValueOffset uint16
|
||||
ValueInum uint32
|
||||
ValueSize uint32
|
||||
Hash uint32
|
||||
//Name []byte
|
||||
}
|
209
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go
generated
vendored
209
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go
generated
vendored
|
@ -1,209 +0,0 @@
|
|||
package tar2ext4
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
|
||||
"github.com/Microsoft/hcsshim/ext4/internal/format"
|
||||
)
|
||||
|
||||
type params struct {
|
||||
convertWhiteout bool
|
||||
appendVhdFooter bool
|
||||
ext4opts []compactext4.Option
|
||||
}
|
||||
|
||||
// Option is the type for optional parameters to Convert.
|
||||
type Option func(*params)
|
||||
|
||||
// ConvertWhiteout instructs the converter to convert OCI-style whiteouts
|
||||
// (beginning with .wh.) to overlay-style whiteouts.
|
||||
func ConvertWhiteout(p *params) {
|
||||
p.convertWhiteout = true
|
||||
}
|
||||
|
||||
// AppendVhdFooter instructs the converter to add a fixed VHD footer to the
|
||||
// file.
|
||||
func AppendVhdFooter(p *params) {
|
||||
p.appendVhdFooter = true
|
||||
}
|
||||
|
||||
// InlineData instructs the converter to write small files into the inode
|
||||
// structures directly. This creates smaller images but currently is not
|
||||
// compatible with DAX.
|
||||
func InlineData(p *params) {
|
||||
p.ext4opts = append(p.ext4opts, compactext4.InlineData)
|
||||
}
|
||||
|
||||
// MaximumDiskSize instructs the writer to limit the disk size to the specified
|
||||
// value. This also reserves enough metadata space for the specified disk size.
|
||||
// If not provided, then 16GB is the default.
|
||||
func MaximumDiskSize(size int64) Option {
|
||||
return func(p *params) {
|
||||
p.ext4opts = append(p.ext4opts, compactext4.MaximumDiskSize(size))
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
whiteoutPrefix = ".wh."
|
||||
opaqueWhiteout = ".wh..wh..opq"
|
||||
)
|
||||
|
||||
// Convert writes a compact ext4 file system image that contains the files in the
|
||||
// input tar stream.
|
||||
func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
|
||||
var p params
|
||||
for _, opt := range options {
|
||||
opt(&p)
|
||||
}
|
||||
t := tar.NewReader(bufio.NewReader(r))
|
||||
fs := compactext4.NewWriter(w, p.ext4opts...)
|
||||
for {
|
||||
hdr, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.convertWhiteout {
|
||||
dir, name := path.Split(hdr.Name)
|
||||
if strings.HasPrefix(name, whiteoutPrefix) {
|
||||
if name == opaqueWhiteout {
|
||||
// Update the directory with the appropriate xattr.
|
||||
f, err := fs.Stat(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Xattrs["trusted.overlay.opaque"] = []byte("y")
|
||||
err = fs.Create(dir, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Create an overlay-style whiteout.
|
||||
f := &compactext4.File{
|
||||
Mode: compactext4.S_IFCHR,
|
||||
Devmajor: 0,
|
||||
Devminor: 0,
|
||||
}
|
||||
err = fs.Create(path.Join(dir, name[len(whiteoutPrefix):]), f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
err = fs.Link(hdr.Linkname, hdr.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
f := &compactext4.File{
|
||||
Mode: uint16(hdr.Mode),
|
||||
Atime: hdr.AccessTime,
|
||||
Mtime: hdr.ModTime,
|
||||
Ctime: hdr.ChangeTime,
|
||||
Crtime: hdr.ModTime,
|
||||
Size: hdr.Size,
|
||||
Uid: uint32(hdr.Uid),
|
||||
Gid: uint32(hdr.Gid),
|
||||
Linkname: hdr.Linkname,
|
||||
Devmajor: uint32(hdr.Devmajor),
|
||||
Devminor: uint32(hdr.Devminor),
|
||||
Xattrs: make(map[string][]byte),
|
||||
}
|
||||
for key, value := range hdr.PAXRecords {
|
||||
const xattrPrefix = "SCHILY.xattr."
|
||||
if strings.HasPrefix(key, xattrPrefix) {
|
||||
f.Xattrs[key[len(xattrPrefix):]] = []byte(value)
|
||||
}
|
||||
}
|
||||
|
||||
var typ uint16
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
typ = compactext4.S_IFREG
|
||||
case tar.TypeSymlink:
|
||||
typ = compactext4.S_IFLNK
|
||||
case tar.TypeChar:
|
||||
typ = compactext4.S_IFCHR
|
||||
case tar.TypeBlock:
|
||||
typ = compactext4.S_IFBLK
|
||||
case tar.TypeDir:
|
||||
typ = compactext4.S_IFDIR
|
||||
case tar.TypeFifo:
|
||||
typ = compactext4.S_IFIFO
|
||||
}
|
||||
f.Mode &= ^compactext4.TypeMask
|
||||
f.Mode |= typ
|
||||
err = fs.CreateWithParents(hdr.Name, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(fs, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
err := fs.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p.appendVhdFooter {
|
||||
size, err := w.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(w, binary.BigEndian, makeFixedVHDFooter(size))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadExt4SuperBlock reads and returns ext4 super block from VHD
|
||||
//
|
||||
// The layout on disk is as follows:
|
||||
// | Group 0 padding | - 1024 bytes
|
||||
// | ext4 SuperBlock | - 1 block
|
||||
// | Group Descriptors | - many blocks
|
||||
// | Reserved GDT Blocks | - many blocks
|
||||
// | Data Block Bitmap | - 1 block
|
||||
// | inode Bitmap | - 1 block
|
||||
// | inode Table | - many blocks
|
||||
// | Data Blocks | - many blocks
|
||||
//
|
||||
// More details can be found here https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout
|
||||
//
|
||||
// Our goal is to skip the Group 0 padding, read and return the ext4 SuperBlock
|
||||
func ReadExt4SuperBlock(vhdPath string) (*format.SuperBlock, error) {
|
||||
vhd, err := os.OpenFile(vhdPath, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer vhd.Close()
|
||||
|
||||
// Skip padding at the start
|
||||
if _, err := vhd.Seek(1024, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sb format.SuperBlock
|
||||
if err := binary.Read(vhd, binary.LittleEndian, &sb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &sb, nil
|
||||
}
|
76
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go
generated
vendored
76
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go
generated
vendored
|
@ -1,76 +0,0 @@
|
|||
package tar2ext4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// Constants for the VHD footer
|
||||
const (
|
||||
cookieMagic = "conectix"
|
||||
featureMask = 0x2
|
||||
fileFormatVersionMagic = 0x00010000
|
||||
fixedDataOffset = -1
|
||||
creatorVersionMagic = 0x000a0000
|
||||
diskTypeFixed = 2
|
||||
)
|
||||
|
||||
type vhdFooter struct {
|
||||
Cookie [8]byte
|
||||
Features uint32
|
||||
FileFormatVersion uint32
|
||||
DataOffset int64
|
||||
TimeStamp uint32
|
||||
CreatorApplication [4]byte
|
||||
CreatorVersion uint32
|
||||
CreatorHostOS [4]byte
|
||||
OriginalSize int64
|
||||
CurrentSize int64
|
||||
DiskGeometry uint32
|
||||
DiskType uint32
|
||||
Checksum uint32
|
||||
UniqueID [16]uint8
|
||||
SavedState uint8
|
||||
Reserved [427]uint8
|
||||
}
|
||||
|
||||
func makeFixedVHDFooter(size int64) *vhdFooter {
|
||||
footer := &vhdFooter{
|
||||
Features: featureMask,
|
||||
FileFormatVersion: fileFormatVersionMagic,
|
||||
DataOffset: fixedDataOffset,
|
||||
CreatorVersion: creatorVersionMagic,
|
||||
OriginalSize: size,
|
||||
CurrentSize: size,
|
||||
DiskType: diskTypeFixed,
|
||||
UniqueID: generateUUID(),
|
||||
}
|
||||
copy(footer.Cookie[:], cookieMagic)
|
||||
footer.Checksum = calculateCheckSum(footer)
|
||||
return footer
|
||||
}
|
||||
|
||||
func calculateCheckSum(footer *vhdFooter) uint32 {
|
||||
oldchk := footer.Checksum
|
||||
footer.Checksum = 0
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
_ = binary.Write(buf, binary.BigEndian, footer)
|
||||
|
||||
var chk uint32
|
||||
bufBytes := buf.Bytes()
|
||||
for i := 0; i < len(bufBytes); i++ {
|
||||
chk += uint32(bufBytes[i])
|
||||
}
|
||||
footer.Checksum = oldchk
|
||||
return uint32(^chk)
|
||||
}
|
||||
|
||||
func generateUUID() [16]byte {
|
||||
res := [16]byte{}
|
||||
if _, err := rand.Read(res[:]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
109
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go
generated
vendored
109
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go
generated
vendored
|
@ -1,109 +0,0 @@
|
|||
package remotefs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RemotefsCmd is the name of the remotefs meta command
|
||||
const RemotefsCmd = "remotefs"
|
||||
|
||||
// Name of the commands when called from the cli context (remotefs <CMD> ...)
|
||||
const (
|
||||
StatCmd = "stat"
|
||||
LstatCmd = "lstat"
|
||||
ReadlinkCmd = "readlink"
|
||||
MkdirCmd = "mkdir"
|
||||
MkdirAllCmd = "mkdirall"
|
||||
RemoveCmd = "remove"
|
||||
RemoveAllCmd = "removeall"
|
||||
LinkCmd = "link"
|
||||
SymlinkCmd = "symlink"
|
||||
LchmodCmd = "lchmod"
|
||||
LchownCmd = "lchown"
|
||||
MknodCmd = "mknod"
|
||||
MkfifoCmd = "mkfifo"
|
||||
OpenFileCmd = "openfile"
|
||||
ReadFileCmd = "readfile"
|
||||
WriteFileCmd = "writefile"
|
||||
ReadDirCmd = "readdir"
|
||||
ResolvePathCmd = "resolvepath"
|
||||
ExtractArchiveCmd = "extractarchive"
|
||||
ArchivePathCmd = "archivepath"
|
||||
)
|
||||
|
||||
// ErrInvalid is returned if the parameters are invalid
|
||||
var ErrInvalid = errors.New("invalid arguments")
|
||||
|
||||
// ErrUnknown is returned for an unknown remotefs command
|
||||
var ErrUnknown = errors.New("unkown command")
|
||||
|
||||
// ExportedError is the serialized version of the a Go error.
|
||||
// It also provides a trivial implementation of the error interface.
|
||||
type ExportedError struct {
|
||||
ErrString string
|
||||
ErrNum int `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Error returns an error string
|
||||
func (ee *ExportedError) Error() string {
|
||||
return ee.ErrString
|
||||
}
|
||||
|
||||
// FileInfo is the stat struct returned by the remotefs system. It
|
||||
// fulfills the os.FileInfo interface.
|
||||
type FileInfo struct {
|
||||
NameVar string
|
||||
SizeVar int64
|
||||
ModeVar os.FileMode
|
||||
ModTimeVar int64 // Serialization of time.Time breaks in travis, so use an int
|
||||
IsDirVar bool
|
||||
}
|
||||
|
||||
var _ os.FileInfo = &FileInfo{}
|
||||
|
||||
// Name returns the filename from a FileInfo structure
|
||||
func (f *FileInfo) Name() string { return f.NameVar }
|
||||
|
||||
// Size returns the size from a FileInfo structure
|
||||
func (f *FileInfo) Size() int64 { return f.SizeVar }
|
||||
|
||||
// Mode returns the mode from a FileInfo structure
|
||||
func (f *FileInfo) Mode() os.FileMode { return f.ModeVar }
|
||||
|
||||
// ModTime returns the modification time from a FileInfo structure
|
||||
func (f *FileInfo) ModTime() time.Time { return time.Unix(0, f.ModTimeVar) }
|
||||
|
||||
// IsDir returns the is-directory indicator from a FileInfo structure
|
||||
func (f *FileInfo) IsDir() bool { return f.IsDirVar }
|
||||
|
||||
// Sys provides an interface to a FileInfo structure
|
||||
func (f *FileInfo) Sys() interface{} { return nil }
|
||||
|
||||
// FileHeader is a header for remote *os.File operations for remotefs.OpenFile
|
||||
type FileHeader struct {
|
||||
Cmd uint32
|
||||
Size uint64
|
||||
}
|
||||
|
||||
const (
|
||||
// Read request command.
|
||||
Read uint32 = iota
|
||||
// Write request command.
|
||||
Write
|
||||
// Seek request command.
|
||||
Seek
|
||||
// Close request command.
|
||||
Close
|
||||
// CmdOK is a response meaning request succeeded.
|
||||
CmdOK
|
||||
// CmdFailed is a response meaning request failed.
|
||||
CmdFailed
|
||||
)
|
||||
|
||||
// SeekHeader is header for the Seek operation for remotefs.OpenFile
|
||||
type SeekHeader struct {
|
||||
Offset int64
|
||||
Whence int32
|
||||
}
|
578
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go
generated
vendored
578
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go
generated
vendored
|
@ -1,578 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package remotefs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Func is the function definition for a generic remote fs function
|
||||
// The input to the function is any serialized structs / data from in and the string slice
|
||||
// from args. The output of the function will be serialized and written to out.
|
||||
type Func func(stdin io.Reader, stdout io.Writer, args []string) error
|
||||
|
||||
// Commands provide a string -> remotefs function mapping.
|
||||
// This is useful for commandline programs that will receive a string
|
||||
// as the function to execute.
|
||||
var Commands = map[string]Func{
|
||||
StatCmd: Stat,
|
||||
LstatCmd: Lstat,
|
||||
ReadlinkCmd: Readlink,
|
||||
MkdirCmd: Mkdir,
|
||||
MkdirAllCmd: MkdirAll,
|
||||
RemoveCmd: Remove,
|
||||
RemoveAllCmd: RemoveAll,
|
||||
LinkCmd: Link,
|
||||
SymlinkCmd: Symlink,
|
||||
LchmodCmd: Lchmod,
|
||||
LchownCmd: Lchown,
|
||||
MknodCmd: Mknod,
|
||||
MkfifoCmd: Mkfifo,
|
||||
OpenFileCmd: OpenFile,
|
||||
ReadFileCmd: ReadFile,
|
||||
WriteFileCmd: WriteFile,
|
||||
ReadDirCmd: ReadDir,
|
||||
ResolvePathCmd: ResolvePath,
|
||||
ExtractArchiveCmd: ExtractArchive,
|
||||
ArchivePathCmd: ArchivePath,
|
||||
}
|
||||
|
||||
// Stat functions like os.Stat.
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
// Out:
|
||||
// - out = FileInfo object
|
||||
func Stat(in io.Reader, out io.Writer, args []string) error {
|
||||
return stat(in, out, args, os.Stat)
|
||||
}
|
||||
|
||||
// Lstat functions like os.Lstat.
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
// Out:
|
||||
// - out = FileInfo object
|
||||
func Lstat(in io.Reader, out io.Writer, args []string) error {
|
||||
return stat(in, out, args, os.Lstat)
|
||||
}
|
||||
|
||||
func stat(in io.Reader, out io.Writer, args []string, statfunc func(string) (os.FileInfo, error)) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
fi, err := statfunc(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info := FileInfo{
|
||||
NameVar: fi.Name(),
|
||||
SizeVar: fi.Size(),
|
||||
ModeVar: fi.Mode(),
|
||||
ModTimeVar: fi.ModTime().UnixNano(),
|
||||
IsDirVar: fi.IsDir(),
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := out.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Readlink works like os.Readlink
|
||||
// In:
|
||||
// - args[0] is path
|
||||
// Out:
|
||||
// - Write link result to out
|
||||
func Readlink(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
l, err := os.Readlink(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := out.Write([]byte(l)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir works like os.Mkdir
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
// - args[1] is the permissions in octal (like 0755)
|
||||
func Mkdir(in io.Reader, out io.Writer, args []string) error {
|
||||
return mkdir(in, out, args, os.Mkdir)
|
||||
}
|
||||
|
||||
// MkdirAll works like os.MkdirAll.
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
// - args[1] is the permissions in octal (like 0755)
|
||||
func MkdirAll(in io.Reader, out io.Writer, args []string) error {
|
||||
return mkdir(in, out, args, os.MkdirAll)
|
||||
}
|
||||
|
||||
func mkdir(in io.Reader, out io.Writer, args []string, mkdirFunc func(string, os.FileMode) error) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return mkdirFunc(args[0], os.FileMode(perm))
|
||||
}
|
||||
|
||||
// Remove works like os.Remove
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
func Remove(in io.Reader, out io.Writer, args []string) error {
|
||||
return remove(in, out, args, os.Remove)
|
||||
}
|
||||
|
||||
// RemoveAll works like os.RemoveAll
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
func RemoveAll(in io.Reader, out io.Writer, args []string) error {
|
||||
return remove(in, out, args, os.RemoveAll)
|
||||
}
|
||||
|
||||
func remove(in io.Reader, out io.Writer, args []string, removefunc func(string) error) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
return removefunc(args[0])
|
||||
}
|
||||
|
||||
// Link works like os.Link
|
||||
// Args:
|
||||
// - args[0] = old path name (link source)
|
||||
// - args[1] = new path name (link dest)
|
||||
func Link(in io.Reader, out io.Writer, args []string) error {
|
||||
return link(in, out, args, os.Link)
|
||||
}
|
||||
|
||||
// Symlink works like os.Symlink
|
||||
// Args:
|
||||
// - args[0] = old path name (link source)
|
||||
// - args[1] = new path name (link dest)
|
||||
func Symlink(in io.Reader, out io.Writer, args []string) error {
|
||||
return link(in, out, args, os.Symlink)
|
||||
}
|
||||
|
||||
func link(in io.Reader, out io.Writer, args []string, linkfunc func(string, string) error) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
return linkfunc(args[0], args[1])
|
||||
}
|
||||
|
||||
// Lchmod changes permission of the given file without following symlinks
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = permission mode in octal (like 0755)
|
||||
func Lchmod(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := args[0]
|
||||
if !filepath.IsAbs(path) {
|
||||
path, err = filepath.Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return unix.Fchmodat(0, path, uint32(perm), unix.AT_SYMLINK_NOFOLLOW)
|
||||
}
|
||||
|
||||
// Lchown works like os.Lchown
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = uid in base 10
|
||||
// - args[2] = gid in base 10
|
||||
func Lchown(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 3 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
uid, err := strconv.ParseInt(args[1], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gid, err := strconv.ParseInt(args[2], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Lchown(args[0], int(uid), int(gid))
|
||||
}
|
||||
|
||||
// Mknod works like syscall.Mknod
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = permission mode in octal (like 0755)
|
||||
// - args[2] = major device number in base 10
|
||||
// - args[3] = minor device number in base 10
|
||||
func Mknod(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 4 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
major, err := strconv.ParseInt(args[2], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minor, err := strconv.ParseInt(args[3], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dev := unix.Mkdev(uint32(major), uint32(minor))
|
||||
return unix.Mknod(args[0], uint32(perm), int(dev))
|
||||
}
|
||||
|
||||
// Mkfifo creates a FIFO special file with the given path name and permissions
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = permission mode in octal (like 0755)
|
||||
func Mkfifo(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return unix.Mkfifo(args[0], uint32(perm))
|
||||
}
|
||||
|
||||
// OpenFile works like os.OpenFile. To manage the file pointer state,
|
||||
// this function acts as a single file "file server" with Read/Write/Close
|
||||
// being serialized control codes from in.
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = flag in base 10
|
||||
// - args[2] = permission mode in octal (like 0755)
|
||||
func OpenFile(in io.Reader, out io.Writer, args []string) (err error) {
|
||||
logrus.Debugf("OpenFile: %v", args)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
logrus.Errorf("OpenFile: return is non-nil, so writing cmdFailed back: %v", err)
|
||||
// error code will be serialized by the caller, so don't write it here
|
||||
WriteFileHeader(out, &FileHeader{Cmd: CmdFailed}, nil)
|
||||
}
|
||||
}()
|
||||
|
||||
if len(args) < 3 {
|
||||
logrus.Errorf("OpenFile: Not enough parameters")
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
flag, err := strconv.ParseInt(args[1], 10, 32)
|
||||
if err != nil {
|
||||
logrus.Errorf("OpenFile: Invalid flag: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[2], 8, 32)
|
||||
if err != nil {
|
||||
logrus.Errorf("OpenFile: Invalid permission: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(args[0], int(flag), os.FileMode(perm))
|
||||
if err != nil {
|
||||
logrus.Errorf("OpenFile: Failed to open: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Signal the client that OpenFile succeeded
|
||||
logrus.Debugf("OpenFile: Sending OK header")
|
||||
if err := WriteFileHeader(out, &FileHeader{Cmd: CmdOK}, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
logrus.Debugf("OpenFile: reading header")
|
||||
hdr, err := ReadFileHeader(in)
|
||||
if err != nil {
|
||||
logrus.Errorf("OpenFile: Failed to ReadFileHeader: %v", err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("OpenFile: Header: %+v", hdr)
|
||||
|
||||
var buf []byte
|
||||
switch hdr.Cmd {
|
||||
case Read:
|
||||
logrus.Debugf("OpenFile: Read command")
|
||||
buf = make([]byte, hdr.Size, hdr.Size)
|
||||
n, err := f.Read(buf)
|
||||
logrus.Debugf("OpenFile: Issued a read for %d, got %d bytes and error %v", hdr.Size, n, err)
|
||||
if err != nil {
|
||||
logrus.Errorf("OpenFile: Read failed: %v", err)
|
||||
return err
|
||||
}
|
||||
buf = buf[:n]
|
||||
case Write:
|
||||
logrus.Debugf("OpenFile: Write command")
|
||||
if _, err := io.CopyN(f, in, int64(hdr.Size)); err != nil {
|
||||
logrus.Errorf("OpenFile: Write CopyN() failed: %v", err)
|
||||
return err
|
||||
}
|
||||
case Seek:
|
||||
logrus.Debugf("OpenFile: Seek command")
|
||||
seekHdr := &SeekHeader{}
|
||||
if err := binary.Read(in, binary.BigEndian, seekHdr); err != nil {
|
||||
logrus.Errorf("OpenFile: Seek Read() failed: %v", err)
|
||||
return err
|
||||
}
|
||||
res, err := f.Seek(seekHdr.Offset, int(seekHdr.Whence))
|
||||
if err != nil {
|
||||
logrus.Errorf("OpenFile: Seek Seek() failed: %v", err)
|
||||
return err
|
||||
}
|
||||
buffer := &bytes.Buffer{}
|
||||
if err := binary.Write(buffer, binary.BigEndian, res); err != nil {
|
||||
logrus.Errorf("OpenFile: Seek Write() failed: %v", err)
|
||||
return err
|
||||
}
|
||||
buf = buffer.Bytes()
|
||||
case Close:
|
||||
logrus.Debugf("OpenFile: Close command")
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
logrus.Errorf("OpenFile: unknown command")
|
||||
return ErrUnknown
|
||||
}
|
||||
|
||||
logrus.Debugf("OpenFile: Writing back OK header of size %d", len(buf))
|
||||
retHdr := &FileHeader{
|
||||
Cmd: CmdOK,
|
||||
Size: uint64(len(buf)),
|
||||
}
|
||||
if err := WriteFileHeader(out, retHdr, buf); err != nil {
|
||||
logrus.Errorf("OpenFile: WriteFileHeader() failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if hdr.Cmd == Close {
|
||||
break
|
||||
}
|
||||
}
|
||||
logrus.Debugf("OpenFile: Done, no error")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFile works like ioutil.ReadFile but instead writes the file to a writer
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// Out:
|
||||
// - Write file contents to out
|
||||
func ReadFile(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
f, err := os.Open(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(out, f); err != nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteFile works like ioutil.WriteFile but instead reads the file from a reader
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = permission mode in octal (like 0755)
|
||||
// - input data stream from in
|
||||
func WriteFile(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(args[0], os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(f, in); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadDir works like *os.File.Readdir but instead writes the result to a writer
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = number of directory entries to return. If <= 0, return all entries in directory
|
||||
func ReadDir(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
n, err := strconv.ParseInt(args[1], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Open(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
infos, err := f.Readdir(int(n))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileInfos := make([]FileInfo, len(infos))
|
||||
for i := range infos {
|
||||
fileInfos[i] = FileInfo{
|
||||
NameVar: infos[i].Name(),
|
||||
SizeVar: infos[i].Size(),
|
||||
ModeVar: infos[i].Mode(),
|
||||
ModTimeVar: infos[i].ModTime().UnixNano(),
|
||||
IsDirVar: infos[i].IsDir(),
|
||||
}
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(fileInfos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := out.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResolvePath works like docker's symlink.FollowSymlinkInScope.
|
||||
// It takens in a `path` and a `root` and evaluates symlinks in `path`
|
||||
// as if they were scoped in `root`. `path` must be a child path of `root`.
|
||||
// In other words, `path` must have `root` as a prefix.
|
||||
// Example:
|
||||
// path=/foo/bar -> /baz
|
||||
// root=/foo,
|
||||
// Expected result = /foo/baz
|
||||
//
|
||||
// Args:
|
||||
// - args[0] is `path`
|
||||
// - args[1] is `root`
|
||||
// Out:
|
||||
// - Write resolved path to stdout
|
||||
func ResolvePath(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
res, err := symlink.FollowSymlinkInScope(args[0], args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = out.Write([]byte(res)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractArchive extracts the archive read from in.
|
||||
// Args:
|
||||
// - in = size of json | json of archive.TarOptions | input tar stream
|
||||
// - args[0] = extract directory name
|
||||
func ExtractArchive(in io.Reader, out io.Writer, args []string) error {
|
||||
logrus.Debugln("ExtractArchive:", args)
|
||||
if len(args) < 1 {
|
||||
logrus.Errorln("ExtractArchive: invalid args")
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
opts, err := ReadTarOptions(in)
|
||||
if err != nil {
|
||||
logrus.Errorf("ExtractArchive: Failed to read tar options: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("ExtractArchive: Tar options: %+v", opts)
|
||||
if err := archive.Untar(in, args[0], opts); err != nil {
|
||||
logrus.Errorf("ExtractArchive: Failed to Untar: %v", err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("ExtractArchive: Success")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ArchivePath archives the given directory and writes it to out.
|
||||
// Args:
|
||||
// - in = size of json | json of archive.TarOptions
|
||||
// - args[0] = source directory name
|
||||
// Out:
|
||||
// - out = tar file of the archive
|
||||
func ArchivePath(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
opts, err := ReadTarOptions(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := archive.TarWithOptions(args[0], opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(out, r); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
170
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go
generated
vendored
170
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go
generated
vendored
|
@ -1,170 +0,0 @@
|
|||
package remotefs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
// ReadError is an utility function that reads a serialized error from the given reader
|
||||
// and deserializes it.
|
||||
func ReadError(in io.Reader) (*ExportedError, error) {
|
||||
b, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No error
|
||||
if len(b) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var exportedErr ExportedError
|
||||
if err := json.Unmarshal(b, &exportedErr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &exportedErr, nil
|
||||
}
|
||||
|
||||
// ExportedToError will convert a ExportedError to an error. It will try to match
|
||||
// the error to any existing known error like os.ErrNotExist. Otherwise, it will just
|
||||
// return an implementation of the error interface.
|
||||
func ExportedToError(ee *ExportedError) error {
|
||||
if ee.Error() == os.ErrNotExist.Error() {
|
||||
return os.ErrNotExist
|
||||
} else if ee.Error() == os.ErrExist.Error() {
|
||||
return os.ErrExist
|
||||
} else if ee.Error() == os.ErrPermission.Error() {
|
||||
return os.ErrPermission
|
||||
} else if ee.Error() == io.EOF.Error() {
|
||||
return io.EOF
|
||||
}
|
||||
return ee
|
||||
}
|
||||
|
||||
// WriteError is an utility function that serializes the error
|
||||
// and writes it to the output writer.
|
||||
func WriteError(err error, out io.Writer) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = fixOSError(err)
|
||||
|
||||
var errno int
|
||||
switch typedError := err.(type) {
|
||||
case *os.PathError:
|
||||
if se, ok := typedError.Err.(syscall.Errno); ok {
|
||||
errno = int(se)
|
||||
}
|
||||
case *os.LinkError:
|
||||
if se, ok := typedError.Err.(syscall.Errno); ok {
|
||||
errno = int(se)
|
||||
}
|
||||
case *os.SyscallError:
|
||||
if se, ok := typedError.Err.(syscall.Errno); ok {
|
||||
errno = int(se)
|
||||
}
|
||||
}
|
||||
|
||||
exportedError := &ExportedError{
|
||||
ErrString: err.Error(),
|
||||
ErrNum: errno,
|
||||
}
|
||||
|
||||
b, err1 := json.Marshal(exportedError)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
|
||||
_, err1 = out.Write(b)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fixOSError converts possible platform dependent error into the portable errors in the
|
||||
// Go os package if possible.
|
||||
func fixOSError(err error) error {
|
||||
// The os.IsExist, os.IsNotExist, and os.IsPermissions functions are platform
|
||||
// dependent, so sending the raw error might break those functions on a different OS.
|
||||
// Go defines portable errors for these.
|
||||
if os.IsExist(err) {
|
||||
return os.ErrExist
|
||||
} else if os.IsNotExist(err) {
|
||||
return os.ErrNotExist
|
||||
} else if os.IsPermission(err) {
|
||||
return os.ErrPermission
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadTarOptions reads from the specified reader and deserializes an archive.TarOptions struct.
|
||||
func ReadTarOptions(r io.Reader) (*archive.TarOptions, error) {
|
||||
var size uint64
|
||||
if err := binary.Read(r, binary.BigEndian, &size); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawJSON := make([]byte, size)
|
||||
if _, err := io.ReadFull(r, rawJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var opts archive.TarOptions
|
||||
if err := json.Unmarshal(rawJSON, &opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &opts, nil
|
||||
}
|
||||
|
||||
// WriteTarOptions serializes a archive.TarOptions struct and writes it to the writer.
|
||||
func WriteTarOptions(w io.Writer, opts *archive.TarOptions) error {
|
||||
optsBuf, err := json.Marshal(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optsSize := uint64(len(optsBuf))
|
||||
optsSizeBuf := &bytes.Buffer{}
|
||||
if err := binary.Write(optsSizeBuf, binary.BigEndian, optsSize); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := optsSizeBuf.WriteTo(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(optsBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFileHeader reads from r and returns a deserialized FileHeader
|
||||
func ReadFileHeader(r io.Reader) (*FileHeader, error) {
|
||||
hdr := &FileHeader{}
|
||||
if err := binary.Read(r, binary.BigEndian, hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// WriteFileHeader serializes a FileHeader and writes it to w, along with any extra data
|
||||
func WriteFileHeader(w io.Writer, hdr *FileHeader, extraData []byte) error {
|
||||
if err := binary.Write(w, binary.BigEndian, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(extraData); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
Loading…
Reference in a new issue