Merge pull request #34252 from Microsoft/akagup/lcow-remotefs-sandbox
LCOW: Support for docker cp, ADD/COPY on build
This commit is contained in:
commit
a5f9783c93
143 changed files with 5862 additions and 960 deletions
|
@ -2,7 +2,6 @@ package httputils
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
@ -69,8 +68,7 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions,
|
|||
if name == "" {
|
||||
return ArchiveOptions{}, badParameterError{"name"}
|
||||
}
|
||||
|
||||
path := filepath.FromSlash(r.Form.Get("path"))
|
||||
path := r.Form.Get("path")
|
||||
if path == "" {
|
||||
return ArchiveOptions{}, badParameterError{"path"}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/docker/docker/api/types/container"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -24,7 +25,7 @@ const (
|
|||
// instructions in the builder.
|
||||
type Source interface {
|
||||
// Root returns root path for accessing source
|
||||
Root() string
|
||||
Root() containerfs.ContainerFS
|
||||
// Close allows to signal that the filesystem tree won't be used anymore.
|
||||
// For Context implementations using a temporary directory, it is recommended to
|
||||
// delete the temporary directory in Close().
|
||||
|
@ -99,7 +100,7 @@ type Image interface {
|
|||
// ReleaseableLayer is an image layer that can be mounted and released
|
||||
type ReleaseableLayer interface {
|
||||
Release() error
|
||||
Mount() (string, error)
|
||||
Mount() (containerfs.ContainerFS, error)
|
||||
Commit(platform string) (ReleaseableLayer, error)
|
||||
DiffID() layer.DiffID
|
||||
}
|
||||
|
|
|
@ -17,8 +17,6 @@ import (
|
|||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
|
@ -50,21 +48,21 @@ type SessionGetter interface {
|
|||
|
||||
// BuildManager is shared across all Builder objects
|
||||
type BuildManager struct {
|
||||
archiver *archive.Archiver
|
||||
backend builder.Backend
|
||||
pathCache pathCache // TODO: make this persistent
|
||||
sg SessionGetter
|
||||
fsCache *fscache.FSCache
|
||||
idMappings *idtools.IDMappings
|
||||
backend builder.Backend
|
||||
pathCache pathCache // TODO: make this persistent
|
||||
sg SessionGetter
|
||||
fsCache *fscache.FSCache
|
||||
}
|
||||
|
||||
// NewBuildManager creates a BuildManager
|
||||
func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) {
|
||||
bm := &BuildManager{
|
||||
backend: b,
|
||||
pathCache: &syncmap.Map{},
|
||||
sg: sg,
|
||||
archiver: chrootarchive.NewArchiver(idMappings),
|
||||
fsCache: fsCache,
|
||||
backend: b,
|
||||
pathCache: &syncmap.Map{},
|
||||
sg: sg,
|
||||
idMappings: idMappings,
|
||||
fsCache: fsCache,
|
||||
}
|
||||
if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil {
|
||||
return nil, err
|
||||
|
@ -114,7 +112,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
|
|||
ProgressWriter: config.ProgressWriter,
|
||||
Backend: bm.backend,
|
||||
PathCache: bm.pathCache,
|
||||
Archiver: bm.archiver,
|
||||
IDMappings: bm.idMappings,
|
||||
Platform: dockerfile.Platform,
|
||||
}
|
||||
|
||||
|
@ -160,7 +158,7 @@ type builderOptions struct {
|
|||
Backend builder.Backend
|
||||
ProgressWriter backend.ProgressWriter
|
||||
PathCache pathCache
|
||||
Archiver *archive.Archiver
|
||||
IDMappings *idtools.IDMappings
|
||||
Platform string
|
||||
}
|
||||
|
||||
|
@ -177,7 +175,7 @@ type Builder struct {
|
|||
docker builder.Backend
|
||||
clientCtx context.Context
|
||||
|
||||
archiver *archive.Archiver
|
||||
idMappings *idtools.IDMappings
|
||||
buildStages *buildStages
|
||||
disableCommit bool
|
||||
buildArgs *buildArgs
|
||||
|
@ -219,7 +217,7 @@ func newBuilder(clientCtx context.Context, options builderOptions) *Builder {
|
|||
Aux: options.ProgressWriter.AuxFormatter,
|
||||
Output: options.ProgressWriter.Output,
|
||||
docker: options.Backend,
|
||||
archiver: options.Archiver,
|
||||
idMappings: options.IDMappings,
|
||||
buildArgs: newBuildArgs(config.BuildArgs),
|
||||
buildStages: newBuildStages(),
|
||||
imageSources: newImageSources(clientCtx, options),
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package dockerfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
|
@ -8,6 +9,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -15,11 +17,11 @@ import (
|
|||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -35,14 +37,14 @@ type pathCache interface {
|
|||
// copyInfo is a data object which stores the metadata about each source file in
|
||||
// a copyInstruction
|
||||
type copyInfo struct {
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
path string
|
||||
hash string
|
||||
noDecompress bool
|
||||
}
|
||||
|
||||
func (c copyInfo) fullPath() (string, error) {
|
||||
return symlink.FollowSymlinkInScope(filepath.Join(c.root, c.path), c.root)
|
||||
return c.root.ResolveScopedPath(c.path, true)
|
||||
}
|
||||
|
||||
func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
|
||||
|
@ -71,6 +73,7 @@ type copier struct {
|
|||
pathCache pathCache
|
||||
download sourceDownloader
|
||||
tmpPaths []string
|
||||
platform string
|
||||
}
|
||||
|
||||
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
|
||||
|
@ -79,6 +82,7 @@ func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, i
|
|||
pathCache: req.builder.pathCache,
|
||||
download: download,
|
||||
imageSource: imageSource,
|
||||
platform: req.builder.platform,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,13 +90,14 @@ func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstr
|
|||
inst := copyInstruction{cmdName: cmdName}
|
||||
last := len(args) - 1
|
||||
|
||||
// Work in daemon-specific filepath semantics
|
||||
inst.dest = filepath.FromSlash(args[last])
|
||||
// Work in platform-specific filepath semantics
|
||||
inst.dest = fromSlash(args[last], o.platform)
|
||||
separator := string(separator(o.platform))
|
||||
infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest)
|
||||
if err != nil {
|
||||
return inst, errors.Wrapf(err, "%s failed", cmdName)
|
||||
}
|
||||
if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) {
|
||||
if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) {
|
||||
return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
||||
}
|
||||
inst.infos = infos
|
||||
|
@ -122,6 +127,7 @@ func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error)
|
|||
if !urlutil.IsURL(orig) {
|
||||
return o.calcCopyInfo(orig, true)
|
||||
}
|
||||
|
||||
remote, path, err := o.download(orig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -134,7 +140,7 @@ func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error)
|
|||
}
|
||||
path = unnamedFilename
|
||||
}
|
||||
o.tmpPaths = append(o.tmpPaths, remote.Root())
|
||||
o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
|
||||
|
||||
hash, err := remote.Hash(path)
|
||||
ci := newCopyInfoFromSource(remote, path, hash)
|
||||
|
@ -154,14 +160,6 @@ func (o *copier) Cleanup() {
|
|||
// TODO: allowWildcards can probably be removed by refactoring this function further.
|
||||
func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
|
||||
imageSource := o.imageSource
|
||||
if err := validateCopySourcePath(imageSource, origPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Work in daemon-specific OS filepath semantics
|
||||
origPath = filepath.FromSlash(origPath)
|
||||
origPath = strings.TrimPrefix(origPath, string(os.PathSeparator))
|
||||
origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
|
||||
|
||||
// TODO: do this when creating copier. Requires validateCopySourcePath
|
||||
// (and other below) to be aware of the difference sources. Why is it only
|
||||
|
@ -178,8 +176,20 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo,
|
|||
return nil, errors.Errorf("missing build context")
|
||||
}
|
||||
|
||||
root := o.source.Root()
|
||||
|
||||
if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Work in source OS specific filepath semantics
|
||||
// For LCOW, this is NOT the daemon OS.
|
||||
origPath = root.FromSlash(origPath)
|
||||
origPath = strings.TrimPrefix(origPath, string(root.Separator()))
|
||||
origPath = strings.TrimPrefix(origPath, "."+string(root.Separator()))
|
||||
|
||||
// Deal with wildcards
|
||||
if allowWildcards && containsWildcards(origPath) {
|
||||
if allowWildcards && containsWildcards(origPath, root.OS()) {
|
||||
return o.copyWithWildcards(origPath)
|
||||
}
|
||||
|
||||
|
@ -211,6 +221,19 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo,
|
|||
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
|
||||
}
|
||||
|
||||
func containsWildcards(name, platform string) bool {
|
||||
isWindows := platform == "windows"
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '\\' && !isWindows {
|
||||
i++
|
||||
} else if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
|
||||
if im != nil {
|
||||
o.pathCache.Store(im.ImageID()+path, hash)
|
||||
|
@ -218,12 +241,13 @@ func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
|
|||
}
|
||||
|
||||
func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
||||
root := o.source.Root()
|
||||
var copyInfos []copyInfo
|
||||
if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error {
|
||||
if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := remotecontext.Rel(o.source.Root(), path)
|
||||
rel, err := remotecontext.Rel(root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -231,7 +255,7 @@ func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
|||
if rel == "." {
|
||||
return nil
|
||||
}
|
||||
if match, _ := filepath.Match(origPath, rel); !match {
|
||||
if match, _ := root.Match(origPath, rel); !match {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -273,7 +297,7 @@ func walkSource(source builder.Source, origPath string) ([]string, error) {
|
|||
}
|
||||
// Must be a dir
|
||||
var subfiles []string
|
||||
err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error {
|
||||
err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -398,14 +422,19 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b
|
|||
return
|
||||
}
|
||||
|
||||
lc, err := remotecontext.NewLazySource(tmpDir)
|
||||
lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
|
||||
return lc, filename, err
|
||||
}
|
||||
|
||||
type copyFileOptions struct {
|
||||
decompress bool
|
||||
archiver *archive.Archiver
|
||||
chownPair idtools.IDPair
|
||||
archiver Archiver
|
||||
}
|
||||
|
||||
type copyEndpoint struct {
|
||||
driver containerfs.Driver
|
||||
path string
|
||||
}
|
||||
|
||||
func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
|
||||
|
@ -413,6 +442,7 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
destPath, err := dest.fullPath()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -420,59 +450,90 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
|
|||
|
||||
archiver := options.archiver
|
||||
|
||||
src, err := os.Stat(srcPath)
|
||||
srcEndpoint := ©Endpoint{driver: source.root, path: srcPath}
|
||||
destEndpoint := ©Endpoint{driver: dest.root, path: destPath}
|
||||
|
||||
src, err := source.root.Stat(srcPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "source path not found")
|
||||
}
|
||||
if src.IsDir() {
|
||||
return copyDirectory(archiver, srcPath, destPath, options.chownPair)
|
||||
return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair)
|
||||
}
|
||||
if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress {
|
||||
if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
|
||||
return archiver.UntarPath(srcPath, destPath)
|
||||
}
|
||||
|
||||
destExistsAsDir, err := isExistingDirectory(destPath)
|
||||
destExistsAsDir, err := isExistingDirectory(destEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// dest.path must be used because destPath has already been cleaned of any
|
||||
// trailing slash
|
||||
if endsInSlash(dest.path) || destExistsAsDir {
|
||||
if endsInSlash(dest.root, dest.path) || destExistsAsDir {
|
||||
// source.path must be used to get the correct filename when the source
|
||||
// is a symlink
|
||||
destPath = filepath.Join(destPath, filepath.Base(source.path))
|
||||
destPath = dest.root.Join(destPath, source.root.Base(source.path))
|
||||
destEndpoint = ©Endpoint{driver: dest.root, path: destPath}
|
||||
}
|
||||
return copyFile(archiver, srcPath, destPath, options.chownPair)
|
||||
return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair)
|
||||
}
|
||||
|
||||
func copyDirectory(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error {
|
||||
func isArchivePath(driver containerfs.ContainerFS, path string) bool {
|
||||
file, err := driver.Open(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer file.Close()
|
||||
rdr, err := archive.DecompressStream(file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
r := tar.NewReader(rdr)
|
||||
_, err = r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
|
||||
destExists, err := isExistingDirectory(dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to query destination path")
|
||||
}
|
||||
if err := archiver.CopyWithTar(source, dest); err != nil {
|
||||
|
||||
if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy directory")
|
||||
}
|
||||
return fixPermissions(source, dest, chownPair, !destExists)
|
||||
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
||||
return fixPermissions(source.path, dest.path, chownPair, !destExists)
|
||||
}
|
||||
|
||||
func copyFile(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error {
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, chownPair); err != nil {
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
|
||||
if runtime.GOOS == "windows" && dest.driver.OS() == "linux" {
|
||||
// LCOW
|
||||
if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil {
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
} else {
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil {
|
||||
// Normal containers
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
}
|
||||
if err := archiver.CopyFileWithTar(source, dest); err != nil {
|
||||
|
||||
if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy file")
|
||||
}
|
||||
return fixPermissions(source, dest, chownPair, false)
|
||||
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
||||
return fixPermissions(source.path, dest.path, chownPair, false)
|
||||
}
|
||||
|
||||
func endsInSlash(path string) bool {
|
||||
return strings.HasSuffix(path, string(os.PathSeparator))
|
||||
func endsInSlash(driver containerfs.Driver, path string) bool {
|
||||
return strings.HasSuffix(path, string(driver.Separator()))
|
||||
}
|
||||
|
||||
// isExistingDirectory returns true if the path exists and is a directory
|
||||
func isExistingDirectory(path string) (bool, error) {
|
||||
destStat, err := os.Stat(path)
|
||||
func isExistingDirectory(point *copyEndpoint) (bool, error) {
|
||||
destStat, err := point.driver.Stat(point.path)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return false, nil
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/gotestyourself/gotestyourself/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
@ -37,7 +38,7 @@ func TestIsExistingDirectory(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
result, err := isExistingDirectory(testcase.path)
|
||||
result, err := isExistingDirectory(©Endpoint{driver: containerfs.NewLocalDriver(), path: testcase.path})
|
||||
if !assert.NoError(t, err) {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
|
@ -15,7 +16,8 @@ func fixPermissions(source, destination string, rootIDs idtools.IDPair, override
|
|||
err error
|
||||
)
|
||||
if !overrideSkip {
|
||||
skipChownRoot, err = isExistingDirectory(destination)
|
||||
destEndpoint := ©Endpoint{driver: containerfs.NewLocalDriver(), path: destination}
|
||||
skipChownRoot, err = isExistingDirectory(destEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -40,3 +42,7 @@ func fixPermissions(source, destination string, rootIDs idtools.IDPair, override
|
|||
return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID)
|
||||
})
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,8 +1,38 @@
|
|||
package dockerfile
|
||||
|
||||
import "github.com/docker/docker/pkg/idtools"
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error {
|
||||
// chown is not supported on Windows
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
|
||||
// validate windows paths from other images + LCOW
|
||||
if imageSource == nil || platform != "windows" {
|
||||
return nil
|
||||
}
|
||||
|
||||
origPath = filepath.FromSlash(origPath)
|
||||
p := strings.ToLower(filepath.Clean(origPath))
|
||||
if !filepath.IsAbs(p) {
|
||||
if filepath.VolumeName(p) != "" {
|
||||
if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
|
||||
p = p[:len(p)-1]
|
||||
}
|
||||
p += "\\"
|
||||
} else {
|
||||
p = filepath.Join("c:\\", p)
|
||||
}
|
||||
}
|
||||
if _, blacklisted := pathBlacklist[p]; blacklisted {
|
||||
return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,9 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -15,13 +18,69 @@ import (
|
|||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
lcUser "github.com/opencontainers/runc/libcontainer/user"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// For Windows only
|
||||
var pathBlacklist = map[string]bool{
|
||||
"c:\\": true,
|
||||
"c:\\windows": true,
|
||||
}
|
||||
|
||||
// Archiver defines an interface for copying files from one destination to
|
||||
// another using Tar/Untar.
|
||||
type Archiver interface {
|
||||
TarUntar(src, dst string) error
|
||||
UntarPath(src, dst string) error
|
||||
CopyWithTar(src, dst string) error
|
||||
CopyFileWithTar(src, dst string) error
|
||||
IDMappings() *idtools.IDMappings
|
||||
}
|
||||
|
||||
// The builder will use the following interfaces if the container fs implements
|
||||
// these for optimized copies to and from the container.
|
||||
type extractor interface {
|
||||
ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
|
||||
}
|
||||
|
||||
type archiver interface {
|
||||
ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// helper functions to get tar/untar func
|
||||
func untarFunc(i interface{}) containerfs.UntarFunc {
|
||||
if ea, ok := i.(extractor); ok {
|
||||
return ea.ExtractArchive
|
||||
}
|
||||
return chrootarchive.Untar
|
||||
}
|
||||
|
||||
func tarFunc(i interface{}) containerfs.TarFunc {
|
||||
if ap, ok := i.(archiver); ok {
|
||||
return ap.ArchivePath
|
||||
}
|
||||
return archive.TarWithOptions
|
||||
}
|
||||
|
||||
func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver {
|
||||
t, u := tarFunc(src), untarFunc(dst)
|
||||
return &containerfs.Archiver{
|
||||
SrcDriver: src,
|
||||
DstDriver: dst,
|
||||
Tar: t,
|
||||
Untar: u,
|
||||
IDMappingsVar: b.idMappings,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) commit(dispatchState *dispatchState, comment string) error {
|
||||
if b.disableCommit {
|
||||
return nil
|
||||
|
@ -131,28 +190,29 @@ func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get destination image %q", state.imageID)
|
||||
}
|
||||
destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount)
|
||||
|
||||
destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount, b.platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chownPair := b.archiver.IDMappings.RootPair()
|
||||
chownPair := b.idMappings.RootPair()
|
||||
// if a chown was requested, perform the steps to get the uid, gid
|
||||
// translated (if necessary because of user namespaces), and replace
|
||||
// the root pair with the chown pair for copy operations
|
||||
if inst.chownStr != "" {
|
||||
chownPair, err = parseChownFlag(inst.chownStr, destInfo.root, b.archiver.IDMappings)
|
||||
chownPair, err = parseChownFlag(inst.chownStr, destInfo.root.Path(), b.idMappings)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping")
|
||||
}
|
||||
}
|
||||
|
||||
opts := copyFileOptions{
|
||||
decompress: inst.allowLocalDecompression,
|
||||
archiver: b.archiver,
|
||||
chownPair: chownPair,
|
||||
}
|
||||
for _, info := range inst.infos {
|
||||
opts := copyFileOptions{
|
||||
decompress: inst.allowLocalDecompression,
|
||||
archiver: b.getArchiver(info.root, destInfo.root),
|
||||
chownPair: chownPair,
|
||||
}
|
||||
if err := performCopyForInfo(destInfo, info, opts); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy files")
|
||||
}
|
||||
|
@ -236,10 +296,10 @@ func lookupGroup(groupStr, filepath string) (int, error) {
|
|||
return groups[0].Gid, nil
|
||||
}
|
||||
|
||||
func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount) (copyInfo, error) {
|
||||
func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount, platform string) (copyInfo, error) {
|
||||
// Twiddle the destination when it's a relative path - meaning, make it
|
||||
// relative to the WORKINGDIR
|
||||
dest, err := normalizeDest(workingDir, inst.dest)
|
||||
dest, err := normalizeDest(workingDir, inst.dest, platform)
|
||||
if err != nil {
|
||||
return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName)
|
||||
}
|
||||
|
@ -252,6 +312,63 @@ func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMo
|
|||
return newCopyInfoFromSource(destMount, dest, ""), nil
|
||||
}
|
||||
|
||||
// normalizeDest normalises the destination of a COPY/ADD command in a
|
||||
// platform semantically consistent way.
|
||||
func normalizeDest(workingDir, requested string, platform string) (string, error) {
|
||||
dest := fromSlash(requested, platform)
|
||||
endsInSlash := strings.HasSuffix(dest, string(separator(platform)))
|
||||
|
||||
if platform != "windows" {
|
||||
if !path.IsAbs(requested) {
|
||||
dest = path.Join("/", filepath.ToSlash(workingDir), dest)
|
||||
// Make sure we preserve any trailing slash
|
||||
if endsInSlash {
|
||||
dest += "/"
|
||||
}
|
||||
}
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
// We are guaranteed that the working directory is already consistent,
|
||||
// However, Windows also has, for now, the limitation that ADD/COPY can
|
||||
// only be done to the system drive, not any drives that might be present
|
||||
// as a result of a bind mount.
|
||||
//
|
||||
// So... if the path requested is Linux-style absolute (/foo or \\foo),
|
||||
// we assume it is the system drive. If it is a Windows-style absolute
|
||||
// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
|
||||
// strip any configured working directories drive letter so that it
|
||||
// can be subsequently legitimately converted to a Windows volume-style
|
||||
// pathname.
|
||||
|
||||
// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
|
||||
// we only want to validate where the DriveColon part has been supplied.
|
||||
if filepath.IsAbs(dest) {
|
||||
if strings.ToUpper(string(dest[0])) != "C" {
|
||||
return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)")
|
||||
}
|
||||
dest = dest[2:] // Strip the drive letter
|
||||
}
|
||||
|
||||
// Cannot handle relative where WorkingDir is not the system drive.
|
||||
if len(workingDir) > 0 {
|
||||
if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
|
||||
return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
|
||||
}
|
||||
if !system.IsAbs(dest) {
|
||||
if string(workingDir[0]) != "C" {
|
||||
return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive")
|
||||
}
|
||||
dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
|
||||
// Make sure we preserve any trailing slash
|
||||
if endsInSlash {
|
||||
dest += string(os.PathSeparator)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
// For backwards compat, if there's just one info then use it as the
|
||||
// cache look-up string, otherwise hash 'em all into one
|
||||
func getSourceHashFromInfos(infos []copyInfo) string {
|
||||
|
@ -397,3 +514,19 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf
|
|||
ExtraHosts: options.ExtraHosts,
|
||||
}
|
||||
}
|
||||
|
||||
// fromSlash works like filepath.FromSlash but with a given OS platform field
|
||||
func fromSlash(path, platform string) string {
|
||||
if platform == "windows" {
|
||||
return strings.Replace(path, "/", "\\", -1)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// separator returns a OS path separator for the given OS platform
|
||||
func separator(platform string) byte {
|
||||
if platform == "windows" {
|
||||
return '\\'
|
||||
}
|
||||
return '/'
|
||||
}
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// normalizeDest normalizes the destination of a COPY/ADD command in a
|
||||
// platform semantically consistent way.
|
||||
func normalizeDest(workingDir, requested string) (string, error) {
|
||||
dest := filepath.FromSlash(requested)
|
||||
endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator))
|
||||
if !system.IsAbs(requested) {
|
||||
dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest)
|
||||
// Make sure we preserve any trailing slash
|
||||
if endsInSlash {
|
||||
dest += string(os.PathSeparator)
|
||||
}
|
||||
}
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
func containsWildcards(name string) bool {
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '\\' {
|
||||
i++
|
||||
} else if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath string) error {
|
||||
return nil
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
package dockerfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// normalizeDest normalizes the destination of a COPY/ADD command in a
|
||||
// platform semantically consistent way.
|
||||
func normalizeDest(workingDir, requested string) (string, error) {
|
||||
dest := filepath.FromSlash(requested)
|
||||
endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator))
|
||||
|
||||
// We are guaranteed that the working directory is already consistent,
|
||||
// However, Windows also has, for now, the limitation that ADD/COPY can
|
||||
// only be done to the system drive, not any drives that might be present
|
||||
// as a result of a bind mount.
|
||||
//
|
||||
// So... if the path requested is Linux-style absolute (/foo or \\foo),
|
||||
// we assume it is the system drive. If it is a Windows-style absolute
|
||||
// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
|
||||
// strip any configured working directories drive letter so that it
|
||||
// can be subsequently legitimately converted to a Windows volume-style
|
||||
// pathname.
|
||||
|
||||
// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
|
||||
// we only want to validate where the DriveColon part has been supplied.
|
||||
if filepath.IsAbs(dest) {
|
||||
if strings.ToUpper(string(dest[0])) != "C" {
|
||||
return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)")
|
||||
}
|
||||
dest = dest[2:] // Strip the drive letter
|
||||
}
|
||||
|
||||
// Cannot handle relative where WorkingDir is not the system drive.
|
||||
if len(workingDir) > 0 {
|
||||
if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
|
||||
return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
|
||||
}
|
||||
if !system.IsAbs(dest) {
|
||||
if string(workingDir[0]) != "C" {
|
||||
return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive")
|
||||
}
|
||||
dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
|
||||
// Make sure we preserve any trailing slash
|
||||
if endsInSlash {
|
||||
dest += string(os.PathSeparator)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
func containsWildcards(name string) bool {
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var pathBlacklist = map[string]bool{
|
||||
"c:\\": true,
|
||||
"c:\\windows": true,
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath string) error {
|
||||
// validate windows paths from other images
|
||||
if imageSource == nil {
|
||||
return nil
|
||||
}
|
||||
origPath = filepath.FromSlash(origPath)
|
||||
p := strings.ToLower(filepath.Clean(origPath))
|
||||
if !filepath.IsAbs(p) {
|
||||
if filepath.VolumeName(p) != "" {
|
||||
if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
|
||||
p = p[:len(p)-1]
|
||||
}
|
||||
p += "\\"
|
||||
} else {
|
||||
p = filepath.Join("c:\\", p)
|
||||
}
|
||||
}
|
||||
if _, blacklisted := pathBlacklist[p]; blacklisted {
|
||||
return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -40,7 +40,7 @@ func TestNormalizeDest(t *testing.T) {
|
|||
}
|
||||
for _, testcase := range tests {
|
||||
msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested)
|
||||
actual, err := normalizeDest(testcase.current, testcase.requested)
|
||||
actual, err := normalizeDest(testcase.current, testcase.requested, "windows")
|
||||
if testcase.etext == "" {
|
||||
if !assert.NoError(t, err, msg) {
|
||||
continue
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/docker/docker/builder"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -117,8 +118,8 @@ func (l *mockLayer) Release() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *mockLayer) Mount() (string, error) {
|
||||
return "mountPath", nil
|
||||
func (l *mockLayer) Mount() (containerfs.ContainerFS, error) {
|
||||
return containerfs.NewLocalContainerFS("mountPath"), nil
|
||||
}
|
||||
|
||||
func (l *mockLayer) Commit(string) (builder.ReleaseableLayer, error) {
|
||||
|
|
|
@ -36,25 +36,25 @@ func TestFSCache(t *testing.T) {
|
|||
src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"})
|
||||
assert.Nil(t, err)
|
||||
|
||||
dt, err := ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
|
||||
dt, err := ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, string(dt), "data")
|
||||
|
||||
// same id doesn't recalculate anything
|
||||
src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, src1.Root(), src2.Root())
|
||||
assert.Equal(t, src1.Root().Path(), src2.Root().Path())
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, string(dt), "data")
|
||||
assert.Nil(t, src2.Close())
|
||||
|
||||
src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"})
|
||||
assert.Nil(t, err)
|
||||
assert.NotEqual(t, src1.Root(), src3.Root())
|
||||
assert.NotEqual(t, src1.Root().Path(), src3.Root().Path())
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo2"))
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo2"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, string(dt), "data2")
|
||||
|
||||
|
@ -71,12 +71,12 @@ func TestFSCache(t *testing.T) {
|
|||
// new upload with the same shared key shoutl overwrite
|
||||
src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"})
|
||||
assert.Nil(t, err)
|
||||
assert.NotEqual(t, src1.Root(), src3.Root())
|
||||
assert.NotEqual(t, src1.Root().Path(), src3.Root().Path())
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo3"))
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo3"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, string(dt), "data3")
|
||||
assert.Equal(t, src4.Root(), src3.Root())
|
||||
assert.Equal(t, src4.Root().Path(), src3.Root().Path())
|
||||
assert.Nil(t, src4.Close())
|
||||
|
||||
s, err = fscache.DiskUsage()
|
||||
|
|
|
@ -8,19 +8,19 @@ import (
|
|||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type archiveContext struct {
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
sums tarsum.FileInfoSums
|
||||
}
|
||||
|
||||
func (c *archiveContext) Close() error {
|
||||
return os.RemoveAll(c.root)
|
||||
return c.root.RemoveAll(c.root.Path())
|
||||
}
|
||||
|
||||
func convertPathError(err error, cleanpath string) error {
|
||||
|
@ -52,7 +52,8 @@ func FromArchive(tarStream io.Reader) (builder.Source, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
tsc := &archiveContext{root: root}
|
||||
// Assume local file system. Since it's coming from a tar file.
|
||||
tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)}
|
||||
|
||||
// Make sure we clean-up upon error. In the happy case the caller
|
||||
// is expected to manage the clean-up
|
||||
|
@ -82,7 +83,7 @@ func FromArchive(tarStream io.Reader) (builder.Source, error) {
|
|||
return tsc, nil
|
||||
}
|
||||
|
||||
func (c *archiveContext) Root() string {
|
||||
func (c *archiveContext) Root() containerfs.ContainerFS {
|
||||
return c.root
|
||||
}
|
||||
|
||||
|
@ -91,7 +92,7 @@ func (c *archiveContext) Remove(path string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(fullpath)
|
||||
return c.root.RemoveAll(fullpath)
|
||||
}
|
||||
|
||||
func (c *archiveContext) Hash(path string) (string, error) {
|
||||
|
@ -100,7 +101,7 @@ func (c *archiveContext) Hash(path string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
rel, err := filepath.Rel(c.root, fullpath)
|
||||
rel, err := c.root.Rel(c.root.Path(), fullpath)
|
||||
if err != nil {
|
||||
return "", convertPathError(err, cleanpath)
|
||||
}
|
||||
|
@ -115,13 +116,13 @@ func (c *archiveContext) Hash(path string) (string, error) {
|
|||
return path, nil // backwards compat TODO: see if really needed
|
||||
}
|
||||
|
||||
func normalize(path, root string) (cleanPath, fullPath string, err error) {
|
||||
cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:]
|
||||
fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root)
|
||||
func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) {
|
||||
cleanPath = root.Clean(string(root.Separator()) + path)[1:]
|
||||
fullPath, err = root.ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath)
|
||||
}
|
||||
if _, err := os.Lstat(fullPath); err != nil {
|
||||
if _, err := root.Lstat(fullPath); err != nil {
|
||||
return "", "", errors.WithStack(convertPathError(err, path))
|
||||
}
|
||||
return
|
||||
|
|
|
@ -5,15 +5,14 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/dockerignore"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -157,12 +156,12 @@ func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {
|
|||
return parser.Parse(br)
|
||||
}
|
||||
|
||||
func openAt(remote builder.Source, path string) (*os.File, error) {
|
||||
func openAt(remote builder.Source, path string) (driver.File, error) {
|
||||
fullPath, err := FullPath(remote, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.Open(fullPath)
|
||||
return remote.Root().Open(fullPath)
|
||||
}
|
||||
|
||||
// StatAt is a helper for calling Stat on a path from a source
|
||||
|
@ -171,12 +170,12 @@ func StatAt(remote builder.Source, path string) (os.FileInfo, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.Stat(fullPath)
|
||||
return remote.Root().Stat(fullPath)
|
||||
}
|
||||
|
||||
// FullPath is a helper for getting a full path for a path from a source
|
||||
func FullPath(remote builder.Source, path string) (string, error) {
|
||||
fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root())
|
||||
fullPath, err := remote.Root().ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error
|
||||
}
|
||||
|
|
|
@ -5,11 +5,11 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -53,7 +53,7 @@ func checkDirectory(t *testing.T, dir string, expectedFiles []string) {
|
|||
}
|
||||
|
||||
func executeProcess(t *testing.T, contextDir string) {
|
||||
modifiableCtx := &stubRemote{root: contextDir}
|
||||
modifiableCtx := &stubRemote{root: containerfs.NewLocalContainerFS(contextDir)}
|
||||
|
||||
err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName)
|
||||
|
||||
|
@ -105,19 +105,19 @@ func TestProcessShouldLeaveAllFiles(t *testing.T) {
|
|||
|
||||
// TODO: remove after moving to a separate pkg
|
||||
type stubRemote struct {
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
}
|
||||
|
||||
func (r *stubRemote) Hash(path string) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (r *stubRemote) Root() string {
|
||||
func (r *stubRemote) Root() containerfs.ContainerFS {
|
||||
return r.root
|
||||
}
|
||||
func (r *stubRemote) Close() error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
func (r *stubRemote) Remove(p string) error {
|
||||
return os.Remove(filepath.Join(r.root, p))
|
||||
return r.root.Remove(r.root.Join(r.root.Path(), p))
|
||||
}
|
||||
|
|
|
@ -3,11 +3,10 @@ package remotecontext
|
|||
import (
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -15,7 +14,7 @@ import (
|
|||
// NewLazySource creates a new LazyContext. LazyContext defines a hashed build
|
||||
// context based on a root directory. Individual files are hashed first time
|
||||
// they are asked. It is not safe to call methods of LazyContext concurrently.
|
||||
func NewLazySource(root string) (builder.Source, error) {
|
||||
func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) {
|
||||
return &lazySource{
|
||||
root: root,
|
||||
sums: make(map[string]string),
|
||||
|
@ -23,11 +22,11 @@ func NewLazySource(root string) (builder.Source, error) {
|
|||
}
|
||||
|
||||
type lazySource struct {
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
sums map[string]string
|
||||
}
|
||||
|
||||
func (c *lazySource) Root() string {
|
||||
func (c *lazySource) Root() containerfs.ContainerFS {
|
||||
return c.root
|
||||
}
|
||||
|
||||
|
@ -41,7 +40,7 @@ func (c *lazySource) Hash(path string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(fullPath)
|
||||
fi, err := c.root.Lstat(fullPath)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
@ -63,13 +62,13 @@ func (c *lazySource) Hash(path string) (string, error) {
|
|||
}
|
||||
|
||||
func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) {
|
||||
p := filepath.Join(c.root, relPath)
|
||||
p := c.root.Join(c.root.Path(), relPath)
|
||||
h, err := NewFileHash(p, relPath, fi)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create hash for %s", relPath)
|
||||
}
|
||||
if fi.Mode().IsRegular() && fi.Size() > 0 {
|
||||
f, err := os.Open(p)
|
||||
f, err := c.root.Open(p)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to open %s", relPath)
|
||||
}
|
||||
|
@ -85,10 +84,10 @@ func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error)
|
|||
|
||||
// Rel makes a path relative to base path. Same as `filepath.Rel` but can also
|
||||
// handle UUID paths in windows.
|
||||
func Rel(basepath, targpath string) (string, error) {
|
||||
func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) {
|
||||
// filepath.Rel can't handle UUID paths in windows
|
||||
if runtime.GOOS == "windows" {
|
||||
pfx := basepath + `\`
|
||||
if basepath.OS() == "windows" {
|
||||
pfx := basepath.Path() + `\`
|
||||
if strings.HasPrefix(targpath, pfx) {
|
||||
p := strings.TrimPrefix(targpath, pfx)
|
||||
if p == "" {
|
||||
|
@ -97,5 +96,5 @@ func Rel(basepath, targpath string) (string, error) {
|
|||
return p, nil
|
||||
}
|
||||
}
|
||||
return filepath.Rel(basepath, targpath)
|
||||
return basepath.Rel(basepath.Path(), targpath)
|
||||
}
|
||||
|
|
|
@ -3,11 +3,11 @@ package remotecontext
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
)
|
||||
|
@ -19,7 +19,7 @@ type hashed interface {
|
|||
// CachableSource is a source that contains cache records for its contents
|
||||
type CachableSource struct {
|
||||
mu sync.Mutex
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
tree *iradix.Tree
|
||||
txn *iradix.Txn
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ type CachableSource struct {
|
|||
func NewCachableSource(root string) *CachableSource {
|
||||
ts := &CachableSource{
|
||||
tree: iradix.New(),
|
||||
root: root,
|
||||
root: containerfs.NewLocalContainerFS(root),
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func (cs *CachableSource) Scan() error {
|
|||
return err
|
||||
}
|
||||
txn := iradix.New().Txn()
|
||||
err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error {
|
||||
err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to walk %s", path)
|
||||
}
|
||||
|
@ -134,12 +134,12 @@ func (cs *CachableSource) Close() error {
|
|||
}
|
||||
|
||||
func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, err error) {
|
||||
cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:]
|
||||
fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(cs.root, path), cs.root)
|
||||
cleanpath = cs.root.Clean(string(cs.root.Separator()) + path)[1:]
|
||||
fullpath, err = cs.root.ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Forbidden path outside the context: %s (%s)", path, fullpath)
|
||||
}
|
||||
_, err = os.Lstat(fullpath)
|
||||
_, err = cs.root.Lstat(fullpath)
|
||||
if err != nil {
|
||||
return "", "", convertPathError(err, path)
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ func (cs *CachableSource) Hash(path string) (string, error) {
|
|||
}
|
||||
|
||||
// Root returns a root directory for the source
|
||||
func (cs *CachableSource) Root() string {
|
||||
func (cs *CachableSource) Root() containerfs.ContainerFS {
|
||||
return cs.root
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ func (this *TarsumBackup) GoString() string {
|
|||
s := make([]string, 0, 5)
|
||||
s = append(s, "&remotecontext.TarsumBackup{")
|
||||
keysForHashes := make([]string, 0, len(this.Hashes))
|
||||
for k, _ := range this.Hashes {
|
||||
for k := range this.Hashes {
|
||||
keysForHashes = append(keysForHashes, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
|
||||
|
@ -133,7 +133,7 @@ func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) {
|
|||
var l int
|
||||
_ = l
|
||||
if len(m.Hashes) > 0 {
|
||||
for k, _ := range m.Hashes {
|
||||
for k := range m.Hashes {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
v := m.Hashes[k]
|
||||
|
@ -211,7 +211,7 @@ func (this *TarsumBackup) String() string {
|
|||
return "nil"
|
||||
}
|
||||
keysForHashes := make([]string, 0, len(this.Hashes))
|
||||
for k, _ := range this.Hashes {
|
||||
for k := range this.Hashes {
|
||||
keysForHashes = append(keysForHashes, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestCloseRootDirectory(t *testing.T) {
|
|||
t.Fatalf("Error while executing Close: %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(src.Root())
|
||||
_, err = os.Stat(src.Root().Path())
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatal("Directory should not exist at this point")
|
||||
|
|
|
@ -99,6 +99,8 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
|||
FullTimestamp: true,
|
||||
})
|
||||
|
||||
system.InitLCOW(cli.Config.Experimental)
|
||||
|
||||
if err := setDefaultUmask(); err != nil {
|
||||
return fmt.Errorf("Failed to set umask: %v", err)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package container
|
|||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
|
@ -15,17 +14,20 @@ import (
|
|||
// an error if the path points to outside the container's rootfs.
|
||||
func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) {
|
||||
// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
|
||||
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path)
|
||||
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(
|
||||
container.BaseFS.Join(string(container.BaseFS.Separator()), path),
|
||||
path,
|
||||
container.BaseFS.Separator())
|
||||
|
||||
// Split the absPath into its Directory and Base components. We will
|
||||
// resolve the dir in the scope of the container then append the base.
|
||||
dirPath, basePath := filepath.Split(absPath)
|
||||
dirPath, basePath := container.BaseFS.Split(absPath)
|
||||
|
||||
resolvedDirPath, err := container.GetResourcePath(dirPath)
|
||||
if err != nil {
|
||||
|
@ -34,8 +36,7 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri
|
|||
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
|
||||
resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath
|
||||
return resolvedPath, absPath, nil
|
||||
}
|
||||
|
||||
|
@ -44,7 +45,9 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri
|
|||
// resolved to a path on the host corresponding to the given absolute path
|
||||
// inside the container.
|
||||
func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
|
||||
lstat, err := os.Lstat(resolvedPath)
|
||||
driver := container.BaseFS
|
||||
|
||||
lstat, err := driver.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -57,17 +60,17 @@ func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.
|
|||
return nil, err
|
||||
}
|
||||
|
||||
linkTarget, err = filepath.Rel(container.BaseFS, hostPath)
|
||||
linkTarget, err = driver.Rel(driver.Path(), hostPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make it an absolute path.
|
||||
linkTarget = filepath.Join(string(filepath.Separator), linkTarget)
|
||||
linkTarget = driver.Join(string(driver.Separator()), linkTarget)
|
||||
}
|
||||
|
||||
return &types.ContainerPathStat{
|
||||
Name: filepath.Base(absPath),
|
||||
Name: driver.Base(absPath),
|
||||
Size: lstat.Size(),
|
||||
Mode: lstat.Mode(),
|
||||
Mtime: lstat.ModTime(),
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/libcontainerd"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
|
@ -64,10 +65,10 @@ var (
|
|||
type Container struct {
|
||||
StreamConfig *stream.Config
|
||||
// embed for Container to support states directly.
|
||||
*State `json:"State"` // Needed for Engine API version <= 1.11
|
||||
Root string `json:"-"` // Path to the "home" of the container, including metadata.
|
||||
BaseFS string `json:"-"` // Path to the graphdriver mountpoint
|
||||
RWLayer layer.RWLayer `json:"-"`
|
||||
*State `json:"State"` // Needed for Engine API version <= 1.11
|
||||
Root string `json:"-"` // Path to the "home" of the container, including metadata.
|
||||
BaseFS containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount
|
||||
RWLayer layer.RWLayer `json:"-"`
|
||||
ID string
|
||||
Created time.Time
|
||||
Managed bool
|
||||
|
@ -305,15 +306,13 @@ func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error
|
|||
func (container *Container) GetResourcePath(path string) (string, error) {
|
||||
// IMPORTANT - These are paths on the OS where the daemon is running, hence
|
||||
// any filepath operations must be done in an OS agnostic way.
|
||||
|
||||
cleanPath := cleanResourcePath(path)
|
||||
r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS)
|
||||
r, e := container.BaseFS.ResolveScopedPath(path, false)
|
||||
|
||||
// Log this here on the daemon side as there's otherwise no indication apart
|
||||
// from the error being propagated all the way back to the client. This makes
|
||||
// debugging significantly easier and clearly indicates the error comes from the daemon.
|
||||
if e != nil {
|
||||
logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e)
|
||||
logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e)
|
||||
}
|
||||
return r, e
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ package container
|
|||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
|
@ -13,7 +12,6 @@ import (
|
|||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/volume"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
|
@ -131,7 +129,7 @@ func (container *Container) NetworkMounts() []Mount {
|
|||
|
||||
// CopyImagePathContent copies files in destination to the volume.
|
||||
func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
|
||||
rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS)
|
||||
rootfs, err := container.GetResourcePath(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -456,11 +454,6 @@ func (container *Container) TmpfsMounts() ([]Mount, error) {
|
|||
return mounts, nil
|
||||
}
|
||||
|
||||
// cleanResourcePath cleans a resource path and prepares to combine with mnt path
|
||||
func cleanResourcePath(path string) string {
|
||||
return filepath.Join(string(os.PathSeparator), path)
|
||||
}
|
||||
|
||||
// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network
|
||||
func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
|
||||
return false
|
||||
|
|
|
@ -172,18 +172,6 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi
|
|||
return nil
|
||||
}
|
||||
|
||||
// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares
|
||||
// to combine with a volume path
|
||||
func cleanResourcePath(path string) string {
|
||||
if len(path) >= 2 {
|
||||
c := path[0]
|
||||
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
|
||||
path = path[2:]
|
||||
}
|
||||
}
|
||||
return filepath.Join(string(os.PathSeparator), path)
|
||||
}
|
||||
|
||||
// BuildHostnameFile writes the container's hostname file.
|
||||
func (container *Container) BuildHostnameFile() error {
|
||||
return nil
|
||||
|
|
|
@ -3,7 +3,6 @@ package daemon
|
|||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
@ -20,6 +19,31 @@ import (
|
|||
// path does not refer to a directory.
|
||||
var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory")
|
||||
|
||||
// The daemon will use the following interfaces if the container fs implements
|
||||
// these for optimized copies to and from the container.
|
||||
type extractor interface {
|
||||
ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
|
||||
}
|
||||
|
||||
type archiver interface {
|
||||
ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// helper functions to extract or archive
|
||||
func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error {
|
||||
if ea, ok := i.(extractor); ok {
|
||||
return ea.ExtractArchive(src, dst, opts)
|
||||
}
|
||||
return chrootarchive.Untar(src, dst, opts)
|
||||
}
|
||||
|
||||
func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
|
||||
if ap, ok := i.(archiver); ok {
|
||||
return ap.ArchivePath(src, opts)
|
||||
}
|
||||
return archive.TarWithOptions(src, opts)
|
||||
}
|
||||
|
||||
// ContainerCopy performs a deprecated operation of archiving the resource at
|
||||
// the specified path in the container identified by the given name.
|
||||
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
|
||||
|
@ -138,6 +162,9 @@ func (daemon *Daemon) containerStatPath(container *container.Container, path str
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Normalize path before sending to rootfs
|
||||
path = container.BaseFS.FromSlash(path)
|
||||
|
||||
resolvedPath, absPath, err := container.ResolvePath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -178,6 +205,9 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Normalize path before sending to rootfs
|
||||
path = container.BaseFS.FromSlash(path)
|
||||
|
||||
resolvedPath, absPath, err := container.ResolvePath(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -196,7 +226,18 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path
|
|||
// also catches the case when the root directory of the container is
|
||||
// requested: we want the archive entries to start with "/" and not the
|
||||
// container ID.
|
||||
data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath))
|
||||
driver := container.BaseFS
|
||||
|
||||
// Get the source and the base paths of the container resolved path in order
|
||||
// to get the proper tar options for the rebase tar.
|
||||
resolvedPath = driver.Clean(resolvedPath)
|
||||
if driver.Base(resolvedPath) == "." {
|
||||
resolvedPath += string(driver.Separator()) + "."
|
||||
}
|
||||
sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
|
||||
opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
|
||||
|
||||
data, err := archivePath(driver, sourceDir, opts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -235,8 +276,12 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
|||
return err
|
||||
}
|
||||
|
||||
// Normalize path before sending to rootfs'
|
||||
path = container.BaseFS.FromSlash(path)
|
||||
driver := container.BaseFS
|
||||
|
||||
// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
|
||||
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path)
|
||||
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, driver)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -248,7 +293,10 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
|||
// that you can extract an archive to a symlink that points to a directory.
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
|
||||
absPath := archive.PreserveTrailingDotOrSeparator(
|
||||
driver.Join(string(driver.Separator()), path),
|
||||
path,
|
||||
driver.Separator())
|
||||
|
||||
// This will evaluate the last path element if it is a symlink.
|
||||
resolvedPath, err := container.GetResourcePath(absPath)
|
||||
|
@ -256,7 +304,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
|||
return err
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
stat, err := driver.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -279,21 +327,24 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
|||
// a volume file path.
|
||||
var baseRel string
|
||||
if strings.HasPrefix(resolvedPath, `\\?\Volume{`) {
|
||||
if strings.HasPrefix(resolvedPath, container.BaseFS) {
|
||||
baseRel = resolvedPath[len(container.BaseFS):]
|
||||
if strings.HasPrefix(resolvedPath, driver.Path()) {
|
||||
baseRel = resolvedPath[len(driver.Path()):]
|
||||
if baseRel[:1] == `\` {
|
||||
baseRel = baseRel[1:]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
baseRel, err = filepath.Rel(container.BaseFS, resolvedPath)
|
||||
baseRel, err = driver.Rel(driver.Path(), resolvedPath)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Make it an absolute path.
|
||||
absPath = filepath.Join(string(filepath.Separator), baseRel)
|
||||
absPath = driver.Join(string(driver.Separator()), baseRel)
|
||||
|
||||
// @ TODO: gupta-ak: Technically, this works since it no-ops
|
||||
// on Windows and the file system is local anyway on linux.
|
||||
// But eventually, it should be made driver aware.
|
||||
toVolume, err := checkIfPathIsInAVolume(container, absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -315,7 +366,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
|||
}
|
||||
}
|
||||
|
||||
if err := chrootarchive.Untar(content, resolvedPath, options); err != nil {
|
||||
if err := extractArchive(driver, content, resolvedPath, options); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -356,24 +407,28 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Normalize path before sending to rootfs
|
||||
resource = container.BaseFS.FromSlash(resource)
|
||||
driver := container.BaseFS
|
||||
|
||||
basePath, err := container.GetResourcePath(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := os.Stat(basePath)
|
||||
stat, err := driver.Stat(basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var filter []string
|
||||
if !stat.IsDir() {
|
||||
d, f := filepath.Split(basePath)
|
||||
d, f := driver.Split(basePath)
|
||||
basePath = d
|
||||
filter = []string{f}
|
||||
} else {
|
||||
filter = []string{filepath.Base(basePath)}
|
||||
basePath = filepath.Dir(basePath)
|
||||
filter = []string{driver.Base(basePath)}
|
||||
basePath = driver.Dir(basePath)
|
||||
}
|
||||
archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
|
||||
archive, err := archivePath(driver, basePath, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
})
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/registry"
|
||||
|
@ -25,9 +26,9 @@ type releaseableLayer struct {
|
|||
rwLayer layer.RWLayer
|
||||
}
|
||||
|
||||
func (rl *releaseableLayer) Mount() (string, error) {
|
||||
func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) {
|
||||
var err error
|
||||
var mountPath string
|
||||
var mountPath containerfs.ContainerFS
|
||||
var chainID layer.ChainID
|
||||
if rl.roLayer != nil {
|
||||
chainID = rl.roLayer.ChainID()
|
||||
|
@ -36,7 +37,7 @@ func (rl *releaseableLayer) Mount() (string, error) {
|
|||
mountID := stringid.GenerateRandomID()
|
||||
rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create rwlayer")
|
||||
return nil, errors.Wrap(err, "failed to create rwlayer")
|
||||
}
|
||||
|
||||
mountPath, err = rl.rwLayer.Mount("")
|
||||
|
@ -48,7 +49,7 @@ func (rl *releaseableLayer) Mount() (string, error) {
|
|||
logrus.Errorf("Failed to release RWLayer: %s", err)
|
||||
}
|
||||
rl.rwLayer = nil
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mountPath, nil
|
||||
|
|
|
@ -40,6 +40,7 @@ import (
|
|||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/libcontainerd"
|
||||
"github.com/docker/docker/migrate/v1"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/docker/pkg/sysinfo"
|
||||
|
@ -966,11 +967,11 @@ func (daemon *Daemon) Mount(container *container.Container) error {
|
|||
}
|
||||
logrus.Debugf("container mounted via layerStore: %v", dir)
|
||||
|
||||
if container.BaseFS != dir {
|
||||
if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() {
|
||||
// The mount path reported by the graph driver should always be trusted on Windows, since the
|
||||
// volume path for a given mounted layer may change over time. This should only be an error
|
||||
// on non-Windows operating systems.
|
||||
if container.BaseFS != "" && runtime.GOOS != "windows" {
|
||||
if runtime.GOOS != "windows" {
|
||||
daemon.Unmount(container)
|
||||
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
|
||||
daemon.GraphDriverName(container.Platform), container.ID, container.BaseFS, dir)
|
||||
|
@ -1045,7 +1046,7 @@ func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) {
|
|||
return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setupInitLayer(initPath string) error {
|
||||
func (daemon *Daemon) setupInitLayer(initPath containerfs.ContainerFS) error {
|
||||
rootIDs := daemon.idMappings.RootPair()
|
||||
return initlayer.Setup(initPath, rootIDs)
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
|
@ -97,7 +98,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPa
|
|||
return nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) getLayerInit() func(string) error {
|
||||
func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
|
@ -988,7 +989,7 @@ func removeDefaultBridgeInterface() {
|
|||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) getLayerInit() func(string) error {
|
||||
func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error {
|
||||
return daemon.setupInitLayer
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
|
@ -56,7 +57,7 @@ func parseSecurityOpt(container *container.Container, config *containertypes.Hos
|
|||
return nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) getLayerInit() func(string) error {
|
||||
func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCl
|
|||
return nil, err
|
||||
}
|
||||
|
||||
archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{
|
||||
archive, err := archivePath(container.BaseFS, container.BaseFS.Path(), &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
UIDMaps: daemon.idMappings.UIDs(),
|
||||
GIDMaps: daemon.idMappings.GIDs(),
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/directory"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
|
@ -388,12 +389,12 @@ func atomicRemove(source string) error {
|
|||
|
||||
// Get returns the rootfs path for the id.
|
||||
// This will mount the dir at its given path
|
||||
func (a *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (a *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
||||
a.locker.Lock(id)
|
||||
defer a.locker.Unlock(id)
|
||||
parents, err := a.getParentLayerPaths(id)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a.pathCacheLock.Lock()
|
||||
|
@ -407,21 +408,21 @@ func (a *Driver) Get(id, mountLabel string) (string, error) {
|
|||
}
|
||||
}
|
||||
if count := a.ctr.Increment(m); count > 1 {
|
||||
return m, nil
|
||||
return containerfs.NewLocalContainerFS(m), nil
|
||||
}
|
||||
|
||||
// If a dir does not have a parent ( no layers )do not try to mount
|
||||
// just return the diff path to the data
|
||||
if len(parents) > 0 {
|
||||
if err := a.mount(id, m, mountLabel, parents); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
a.pathCacheLock.Lock()
|
||||
a.pathCache[id] = m
|
||||
a.pathCacheLock.Unlock()
|
||||
return m, nil
|
||||
return containerfs.NewLocalContainerFS(m), nil
|
||||
}
|
||||
|
||||
// Put unmounts and updates list of active mounts.
|
||||
|
|
|
@ -9,11 +9,10 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
|
@ -43,6 +42,14 @@ func testInit(dir string, t testing.TB) graphdriver.Driver {
|
|||
return d
|
||||
}
|
||||
|
||||
func driverGet(d *Driver, id string, mntLabel string) (string, error) {
|
||||
mnt, err := d.Get(id, mntLabel)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return mnt.Path(), nil
|
||||
}
|
||||
|
||||
func newDriver(t testing.TB) *Driver {
|
||||
if err := os.MkdirAll(tmp, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -172,7 +179,7 @@ func TestGetWithoutParent(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
expected := path.Join(tmp, "diff", "1")
|
||||
if diffPath != expected {
|
||||
if diffPath.Path() != expected {
|
||||
t.Fatalf("Expected path %s got %s", expected, diffPath)
|
||||
}
|
||||
}
|
||||
|
@ -249,13 +256,13 @@ func TestMountWithParent(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if mntPath == "" {
|
||||
t.Fatal("mntPath should not be empty string")
|
||||
if mntPath == nil {
|
||||
t.Fatal("mntPath should not be nil")
|
||||
}
|
||||
|
||||
expected := path.Join(tmp, "mnt", "2")
|
||||
if mntPath != expected {
|
||||
t.Fatalf("Expected %s got %s", expected, mntPath)
|
||||
if mntPath.Path() != expected {
|
||||
t.Fatalf("Expected %s got %s", expected, mntPath.Path())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -280,8 +287,8 @@ func TestRemoveMountedDir(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if mntPath == "" {
|
||||
t.Fatal("mntPath should not be empty string")
|
||||
if mntPath == nil {
|
||||
t.Fatal("mntPath should not be nil")
|
||||
}
|
||||
|
||||
mounted, err := d.mounted(d.pathCache["2"])
|
||||
|
@ -315,7 +322,7 @@ func TestGetDiff(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1", "")
|
||||
diffPath, err := driverGet(d, "1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -359,7 +366,7 @@ func TestChanges(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
mntPoint, err := d.Get("2", "")
|
||||
mntPoint, err := driverGet(d, "2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -398,7 +405,7 @@ func TestChanges(t *testing.T) {
|
|||
if err := d.CreateReadWrite("3", "2", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mntPoint, err = d.Get("3", "")
|
||||
mntPoint, err = driverGet(d, "3", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -444,7 +451,7 @@ func TestDiffSize(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1", "")
|
||||
diffPath, err := driverGet(d, "1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -486,7 +493,7 @@ func TestChildDiffSize(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1", "")
|
||||
diffPath, err := driverGet(d, "1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -587,7 +594,7 @@ func TestApplyDiff(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1", "")
|
||||
diffPath, err := driverGet(d, "1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -622,7 +629,7 @@ func TestApplyDiff(t *testing.T) {
|
|||
|
||||
// Ensure that the file is in the mount point for id 3
|
||||
|
||||
mountPoint, err := d.Get("3", "")
|
||||
mountPoint, err := driverGet(d, "3", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -665,7 +672,7 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
|
|||
err := d.CreateReadWrite(current, parent, nil)
|
||||
require.NoError(t, err, "current layer %d", i)
|
||||
|
||||
point, err := d.Get(current, "")
|
||||
point, err := driverGet(d, current, "")
|
||||
require.NoError(t, err, "current layer %d", i)
|
||||
|
||||
f, err := os.Create(path.Join(point, current))
|
||||
|
@ -681,7 +688,7 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
|
|||
}
|
||||
|
||||
// Perform the actual mount for the top most image
|
||||
point, err := d.Get(last, "")
|
||||
point, err := driverGet(d, last, "")
|
||||
require.NoError(t, err)
|
||||
files, err := ioutil.ReadDir(point)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"unsafe"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
|
@ -631,29 +632,29 @@ func (d *Driver) Remove(id string) error {
|
|||
}
|
||||
|
||||
// Get the requested filesystem id.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
||||
dir := d.subvolumesDirID(id)
|
||||
st, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !st.IsDir() {
|
||||
return "", fmt.Errorf("%s: not a directory", dir)
|
||||
return nil, fmt.Errorf("%s: not a directory", dir)
|
||||
}
|
||||
|
||||
if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
|
||||
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
|
||||
if err := d.subvolEnableQuota(); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
if err := subvolLimitQgroup(dir, size); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dir, nil
|
||||
return containerfs.NewLocalContainerFS(dir), nil
|
||||
}
|
||||
|
||||
// Put is not implemented for BTRFS as there is no cleanup required for the id.
|
||||
|
|
|
@ -35,12 +35,14 @@ func TestBtrfsSubvolDelete(t *testing.T) {
|
|||
}
|
||||
defer graphtest.PutDriver(t)
|
||||
|
||||
dir, err := d.Get("test", "")
|
||||
dirFS, err := d.Get("test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer d.Put("test")
|
||||
|
||||
dir := dirFS.Path()
|
||||
|
||||
if err := subvolCreate(dir, "subvoltest"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/devicemapper"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
|
@ -163,41 +164,41 @@ func (d *Driver) Remove(id string) error {
|
|||
}
|
||||
|
||||
// Get mounts a device with given id into the root filesystem
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
||||
d.locker.Lock(id)
|
||||
defer d.locker.Unlock(id)
|
||||
mp := path.Join(d.home, "mnt", id)
|
||||
rootFs := path.Join(mp, "rootfs")
|
||||
if count := d.ctr.Increment(mp); count > 1 {
|
||||
return rootFs, nil
|
||||
return containerfs.NewLocalContainerFS(rootFs), nil
|
||||
}
|
||||
|
||||
uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||
if err != nil {
|
||||
d.ctr.Decrement(mp)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the target directories if they don't exist
|
||||
if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
|
||||
d.ctr.Decrement(mp)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
|
||||
d.ctr.Decrement(mp)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Mount the device
|
||||
if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
|
||||
d.ctr.Decrement(mp)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
|
||||
d.ctr.Decrement(mp)
|
||||
d.DeviceSet.UnmountDevice(id, mp)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idFile := path.Join(mp, "id")
|
||||
|
@ -207,11 +208,11 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
|
||||
d.ctr.Decrement(mp)
|
||||
d.DeviceSet.UnmountDevice(id, mp)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return rootFs, nil
|
||||
return containerfs.NewLocalContainerFS(rootFs), nil
|
||||
}
|
||||
|
||||
// Put unmounts a device and removes it.
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/vbatts/tar-split/tar/storage"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
)
|
||||
|
@ -68,7 +69,7 @@ type ProtoDriver interface {
|
|||
// Get returns the mountpoint for the layered filesystem referred
|
||||
// to by this id. You can optionally specify a mountLabel or "".
|
||||
// Returns the absolute path to the mounted layered filesystem.
|
||||
Get(id, mountLabel string) (dir string, err error)
|
||||
Get(id, mountLabel string) (fs containerfs.ContainerFS, err error)
|
||||
// Put releases the system resources for the specified id,
|
||||
// e.g, unmounting layered filesystem.
|
||||
Put(id string) error
|
||||
|
|
|
@ -18,9 +18,9 @@ var (
|
|||
)
|
||||
|
||||
// NaiveDiffDriver takes a ProtoDriver and adds the
|
||||
// capability of the Diffing methods which it may or may not
|
||||
// support on its own. See the comment on the exported
|
||||
// NewNaiveDiffDriver function below.
|
||||
// capability of the Diffing methods on the local file system,
|
||||
// which it may or may not support on its own. See the comment
|
||||
// on the exported NewNaiveDiffDriver function below.
|
||||
// Notably, the AUFS driver doesn't need to be wrapped like this.
|
||||
type NaiveDiffDriver struct {
|
||||
ProtoDriver
|
||||
|
@ -47,10 +47,11 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
|
|||
startTime := time.Now()
|
||||
driver := gdw.ProtoDriver
|
||||
|
||||
layerFs, err := driver.Get(id, "")
|
||||
layerRootFs, err := driver.Get(id, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerFs := layerRootFs.Path()
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -70,12 +71,14 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
|
|||
}), nil
|
||||
}
|
||||
|
||||
parentFs, err := driver.Get(parent, "")
|
||||
parentRootFs, err := driver.Get(parent, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer driver.Put(parent)
|
||||
|
||||
parentFs := parentRootFs.Path()
|
||||
|
||||
changes, err := archive.ChangesDirs(layerFs, parentFs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -104,20 +107,22 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
|
|||
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
|
||||
driver := gdw.ProtoDriver
|
||||
|
||||
layerFs, err := driver.Get(id, "")
|
||||
layerRootFs, err := driver.Get(id, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer driver.Put(id)
|
||||
|
||||
layerFs := layerRootFs.Path()
|
||||
parentFs := ""
|
||||
|
||||
if parent != "" {
|
||||
parentFs, err = driver.Get(parent, "")
|
||||
parentRootFs, err := driver.Get(parent, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer driver.Put(parent)
|
||||
parentFs = parentRootFs.Path()
|
||||
}
|
||||
|
||||
return archive.ChangesDirs(layerFs, parentFs)
|
||||
|
@ -130,12 +135,13 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i
|
|||
driver := gdw.ProtoDriver
|
||||
|
||||
// Mount the root filesystem so we can apply the diff/layer.
|
||||
layerFs, err := driver.Get(id, "")
|
||||
layerRootFs, err := driver.Get(id, "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer driver.Put(id)
|
||||
|
||||
layerFs := layerRootFs.Path()
|
||||
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
|
||||
GIDMaps: gdw.gidMaps}
|
||||
start := time.Now().UTC()
|
||||
|
@ -165,5 +171,5 @@ func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error)
|
|||
}
|
||||
defer driver.Put(id)
|
||||
|
||||
return archive.ChangesSize(layerFs, changes), nil
|
||||
return archive.ChangesSize(layerFs.Path(), changes), nil
|
||||
}
|
||||
|
|
|
@ -5,9 +5,9 @@ package graphtest
|
|||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
contdriver "github.com/containerd/continuity/driver"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -245,7 +245,7 @@ func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, d
|
|||
for i := 0; i < b.N; i++ {
|
||||
|
||||
// Read content
|
||||
c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt"))
|
||||
c, err := contdriver.ReadFile(root, root.Join(root.Path(), "testfile.txt"))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -97,10 +97,10 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str
|
|||
dir, err := driver.Get("empty", "")
|
||||
require.NoError(t, err)
|
||||
|
||||
verifyFile(t, dir, 0755|os.ModeDir, 0, 0)
|
||||
verifyFile(t, dir.Path(), 0755|os.ModeDir, 0, 0)
|
||||
|
||||
// Verify that the directory is empty
|
||||
fis, err := readDir(dir)
|
||||
fis, err := readDir(dir, dir.Path())
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, fis, 0)
|
||||
|
||||
|
@ -328,9 +328,9 @@ func DriverTestSetQuota(t *testing.T, drivername string) {
|
|||
}
|
||||
|
||||
quota := uint64(50 * units.MiB)
|
||||
err = writeRandomFile(path.Join(mountPath, "file"), quota*2)
|
||||
|
||||
err = writeRandomFile(path.Join(mountPath.Path(), "file"), quota*2)
|
||||
if pathError, ok := err.(*os.PathError); ok && pathError.Err != unix.EDQUOT {
|
||||
t.Fatalf("expect write() to fail with %v, got %v", unix.EDQUOT, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -3,12 +3,11 @@ package graphtest
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
|
@ -36,17 +35,17 @@ func addFiles(drv graphdriver.Driver, layer string, seed int64) error {
|
|||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil {
|
||||
if err := driver.WriteFile(root, root.Join(root.Path(), "file-a"), randomContent(64, seed), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil {
|
||||
if err := root.MkdirAll(root.Join(root.Path(), "dir-b"), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil {
|
||||
if err := driver.WriteFile(root, root.Join(root.Path(), "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755)
|
||||
return driver.WriteFile(root, root.Join(root.Path(), "file-c"), randomContent(128*128, seed+2), 0755)
|
||||
}
|
||||
|
||||
func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error {
|
||||
|
@ -56,7 +55,7 @@ func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) e
|
|||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
fileContent, err := ioutil.ReadFile(path.Join(root, filename))
|
||||
fileContent, err := driver.ReadFile(root, root.Join(root.Path(), filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -75,7 +74,7 @@ func addFile(drv graphdriver.Driver, layer, filename string, content []byte) err
|
|||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
return ioutil.WriteFile(path.Join(root, filename), content, 0755)
|
||||
return driver.WriteFile(root, root.Join(root.Path(), filename), content, 0755)
|
||||
}
|
||||
|
||||
func addDirectory(drv graphdriver.Driver, layer, dir string) error {
|
||||
|
@ -85,7 +84,7 @@ func addDirectory(drv graphdriver.Driver, layer, dir string) error {
|
|||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
return os.MkdirAll(path.Join(root, dir), 0755)
|
||||
return root.MkdirAll(root.Join(root.Path(), dir), 0755)
|
||||
}
|
||||
|
||||
func removeAll(drv graphdriver.Driver, layer string, names ...string) error {
|
||||
|
@ -96,7 +95,7 @@ func removeAll(drv graphdriver.Driver, layer string, names ...string) error {
|
|||
defer drv.Put(layer)
|
||||
|
||||
for _, filename := range names {
|
||||
if err := os.RemoveAll(path.Join(root, filename)); err != nil {
|
||||
if err := root.RemoveAll(root.Join(root.Path(), filename)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -110,8 +109,8 @@ func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error {
|
|||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
if _, err := os.Stat(path.Join(root, filename)); err == nil {
|
||||
return fmt.Errorf("file still exists: %s", path.Join(root, filename))
|
||||
if _, err := root.Stat(root.Join(root.Path(), filename)); err == nil {
|
||||
return fmt.Errorf("file still exists: %s", root.Join(root.Path(), filename))
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -127,13 +126,13 @@ func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) e
|
|||
defer drv.Put(layer)
|
||||
|
||||
for i := 0; i < count; i += 100 {
|
||||
dir := path.Join(root, fmt.Sprintf("directory-%d", i))
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i))
|
||||
if err := root.MkdirAll(dir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
for j := 0; i+j < count && j < 100; j++ {
|
||||
file := path.Join(dir, fmt.Sprintf("file-%d", i+j))
|
||||
if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil {
|
||||
file := root.Join(dir, fmt.Sprintf("file-%d", i+j))
|
||||
if err := driver.WriteFile(root, file, randomContent(64, seed+int64(i+j)), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -152,7 +151,7 @@ func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64
|
|||
changes := []archive.Change{}
|
||||
for i := 0; i < count; i += 100 {
|
||||
archiveRoot := fmt.Sprintf("/directory-%d", i)
|
||||
if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil {
|
||||
if err := root.MkdirAll(root.Join(root.Path(), archiveRoot), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for j := 0; i+j < count && j < 100; j++ {
|
||||
|
@ -166,23 +165,23 @@ func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64
|
|||
switch j % 3 {
|
||||
// Update file
|
||||
case 0:
|
||||
change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
|
||||
change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
|
||||
change.Kind = archive.ChangeModify
|
||||
if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
|
||||
if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add file
|
||||
case 1:
|
||||
change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j))
|
||||
change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j))
|
||||
change.Kind = archive.ChangeAdd
|
||||
if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
|
||||
if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Remove file
|
||||
case 2:
|
||||
change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
|
||||
change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
|
||||
change.Kind = archive.ChangeDelete
|
||||
if err := os.Remove(path.Join(root, change.Path)); err != nil {
|
||||
if err := root.Remove(root.Join(root.Path(), change.Path)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -201,10 +200,10 @@ func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64)
|
|||
defer drv.Put(layer)
|
||||
|
||||
for i := 0; i < count; i += 100 {
|
||||
dir := path.Join(root, fmt.Sprintf("directory-%d", i))
|
||||
dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i))
|
||||
for j := 0; i+j < count && j < 100; j++ {
|
||||
file := path.Join(dir, fmt.Sprintf("file-%d", i+j))
|
||||
fileContent, err := ioutil.ReadFile(file)
|
||||
file := root.Join(dir, fmt.Sprintf("file-%d", i+j))
|
||||
fileContent, err := driver.ReadFile(root, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -254,17 +253,17 @@ func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error {
|
|||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil {
|
||||
if err := driver.WriteFile(root, root.Join(root.Path(), "top-id"), []byte(layer), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
layerDir := path.Join(root, fmt.Sprintf("layer-%d", i))
|
||||
if err := os.MkdirAll(layerDir, 0755); err != nil {
|
||||
layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i))
|
||||
if err := root.MkdirAll(layerDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil {
|
||||
if err := driver.WriteFile(root, root.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil {
|
||||
if err := driver.WriteFile(root, root.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -295,7 +294,7 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error {
|
|||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id"))
|
||||
layerIDBytes, err := driver.ReadFile(root, root.Join(root.Path(), "top-id"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -305,16 +304,16 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error {
|
|||
}
|
||||
|
||||
for i := count; i > 0; i-- {
|
||||
layerDir := path.Join(root, fmt.Sprintf("layer-%d", i))
|
||||
layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i))
|
||||
|
||||
thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id"))
|
||||
thisLayerIDBytes, err := driver.ReadFile(root, root.Join(layerDir, "layer-id"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(thisLayerIDBytes, layerIDBytes) {
|
||||
return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes)
|
||||
}
|
||||
layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id"))
|
||||
layerIDBytes, err = driver.ReadFile(root, root.Join(layerDir, "parent-id"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -322,11 +321,11 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// readDir reads a directory just like ioutil.ReadDir()
|
||||
// readDir reads a directory just like driver.ReadDir()
|
||||
// then hides specific files (currently "lost+found")
|
||||
// so the tests don't "see" it
|
||||
func readDir(dir string) ([]os.FileInfo, error) {
|
||||
a, err := ioutil.ReadDir(dir)
|
||||
func readDir(r driver.Driver, dir string) ([]os.FileInfo, error) {
|
||||
a, err := driver.ReadDir(r, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -3,12 +3,11 @@
|
|||
package graphtest
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
contdriver "github.com/containerd/continuity/driver"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -40,31 +39,31 @@ func createBase(t testing.TB, driver graphdriver.Driver, name string) {
|
|||
err := driver.CreateReadWrite(name, "", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dir, err := driver.Get(name, "")
|
||||
dirFS, err := driver.Get(name, "")
|
||||
require.NoError(t, err)
|
||||
defer driver.Put(name)
|
||||
|
||||
subdir := path.Join(dir, "a subdir")
|
||||
require.NoError(t, os.Mkdir(subdir, 0705|os.ModeSticky))
|
||||
require.NoError(t, os.Chown(subdir, 1, 2))
|
||||
subdir := dirFS.Join(dirFS.Path(), "a subdir")
|
||||
require.NoError(t, dirFS.Mkdir(subdir, 0705|os.ModeSticky))
|
||||
require.NoError(t, dirFS.Lchown(subdir, 1, 2))
|
||||
|
||||
file := path.Join(dir, "a file")
|
||||
err = ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid)
|
||||
file := dirFS.Join(dirFS.Path(), "a file")
|
||||
err = contdriver.WriteFile(dirFS, file, []byte("Some data"), 0222|os.ModeSetuid)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func verifyBase(t testing.TB, driver graphdriver.Driver, name string) {
|
||||
dir, err := driver.Get(name, "")
|
||||
dirFS, err := driver.Get(name, "")
|
||||
require.NoError(t, err)
|
||||
defer driver.Put(name)
|
||||
|
||||
subdir := path.Join(dir, "a subdir")
|
||||
subdir := dirFS.Join(dirFS.Path(), "a subdir")
|
||||
verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2)
|
||||
|
||||
file := path.Join(dir, "a file")
|
||||
file := dirFS.Join(dirFS.Path(), "a file")
|
||||
verifyFile(t, file, 0222|os.ModeSetuid, 0, 0)
|
||||
|
||||
files, err := readDir(dir)
|
||||
files, err := readDir(dirFS, dirFS.Path())
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, files, 2)
|
||||
}
|
||||
|
|
|
@ -65,12 +65,14 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Microsoft/opengcs/client"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
|
@ -106,72 +108,24 @@ const (
|
|||
|
||||
// scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs
|
||||
scratchDirectory = "scratch"
|
||||
|
||||
// errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending.
|
||||
errOperationPending syscall.Errno = 0xc0370103
|
||||
)
|
||||
|
||||
// cacheItem is our internal structure representing an item in our local cache
|
||||
// of things that have been mounted.
|
||||
type cacheItem struct {
|
||||
sync.Mutex // Protects operations performed on this item
|
||||
uvmPath string // Path in utility VM
|
||||
hostPath string // Path on host
|
||||
refCount int // How many times its been mounted
|
||||
isSandbox bool // True if a sandbox
|
||||
isMounted bool // True when mounted in a service VM
|
||||
}
|
||||
|
||||
// setIsMounted is a helper function for a cacheItem which does exactly what it says
|
||||
func (ci *cacheItem) setIsMounted() {
|
||||
logrus.Debugf("locking cache item for set isMounted")
|
||||
ci.Lock()
|
||||
defer ci.Unlock()
|
||||
ci.isMounted = true
|
||||
logrus.Debugf("set isMounted on cache item")
|
||||
}
|
||||
|
||||
// incrementRefCount is a helper function for a cacheItem which does exactly what it says
|
||||
func (ci *cacheItem) incrementRefCount() {
|
||||
logrus.Debugf("locking cache item for increment")
|
||||
ci.Lock()
|
||||
defer ci.Unlock()
|
||||
ci.refCount++
|
||||
logrus.Debugf("incremented refcount on cache item %+v", ci)
|
||||
}
|
||||
|
||||
// decrementRefCount is a helper function for a cacheItem which does exactly what it says
|
||||
func (ci *cacheItem) decrementRefCount() int {
|
||||
logrus.Debugf("locking cache item for decrement")
|
||||
ci.Lock()
|
||||
defer ci.Unlock()
|
||||
ci.refCount--
|
||||
logrus.Debugf("decremented refcount on cache item %+v", ci)
|
||||
return ci.refCount
|
||||
}
|
||||
|
||||
// serviceVMItem is our internal structure representing an item in our
|
||||
// map of service VMs we are maintaining.
|
||||
type serviceVMItem struct {
|
||||
sync.Mutex // Serialises operations being performed in this service VM.
|
||||
scratchAttached bool // Has a scratch been attached?
|
||||
config *client.Config // Represents the service VM item.
|
||||
}
|
||||
|
||||
// Driver represents an LCOW graph driver.
|
||||
type Driver struct {
|
||||
dataRoot string // Root path on the host where we are storing everything.
|
||||
cachedSandboxFile string // Location of the local default-sized cached sandbox.
|
||||
cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox.
|
||||
cachedScratchFile string // Location of the local cached empty scratch space.
|
||||
cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch.
|
||||
options []string // Graphdriver options we are initialised with.
|
||||
serviceVmsMutex sync.Mutex // Protects add/updates/delete to the serviceVMs map.
|
||||
serviceVms map[string]*serviceVMItem // Map of the configs representing the service VM(s) we are running.
|
||||
globalMode bool // Indicates if running in an unsafe/global service VM mode.
|
||||
dataRoot string // Root path on the host where we are storing everything.
|
||||
cachedSandboxFile string // Location of the local default-sized cached sandbox.
|
||||
cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox.
|
||||
cachedScratchFile string // Location of the local cached empty scratch space.
|
||||
cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch.
|
||||
options []string // Graphdriver options we are initialised with.
|
||||
globalMode bool // Indicates if running in an unsafe/global service VM mode.
|
||||
|
||||
// NOTE: It is OK to use a cache here because Windows does not support
|
||||
// restoring containers when the daemon dies.
|
||||
|
||||
cacheMutex sync.Mutex // Protects add/update/deletes to cache.
|
||||
cache map[string]*cacheItem // Map holding a cache of all the IDs we've mounted/unmounted.
|
||||
serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running.
|
||||
}
|
||||
|
||||
// layerDetails is the structure returned by a helper function `getLayerDetails`
|
||||
|
@ -204,9 +158,10 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd
|
|||
options: options,
|
||||
cachedSandboxFile: filepath.Join(cd, sandboxFilename),
|
||||
cachedScratchFile: filepath.Join(cd, scratchFilename),
|
||||
cache: make(map[string]*cacheItem),
|
||||
serviceVms: make(map[string]*serviceVMItem),
|
||||
globalMode: false,
|
||||
serviceVms: &serviceVMMap{
|
||||
svms: make(map[string]*serviceVMMapItem),
|
||||
},
|
||||
globalMode: false,
|
||||
}
|
||||
|
||||
// Looks for relevant options
|
||||
|
@ -248,53 +203,59 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd
|
|||
return d, nil
|
||||
}
|
||||
|
||||
func (d *Driver) getVMID(id string) string {
|
||||
if d.globalMode {
|
||||
return svmGlobalID
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// startServiceVMIfNotRunning starts a service utility VM if it is not currently running.
|
||||
// It can optionally be started with a mapped virtual disk. Returns a opengcs config structure
|
||||
// representing the VM.
|
||||
func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedVirtualDisk, context string) (*serviceVMItem, error) {
|
||||
func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) {
|
||||
// Use the global ID if in global mode
|
||||
if d.globalMode {
|
||||
id = svmGlobalID
|
||||
}
|
||||
id = d.getVMID(id)
|
||||
|
||||
title := fmt.Sprintf("lcowdriver: startservicevmifnotrunning %s:", id)
|
||||
|
||||
// Make sure thread-safe when interrogating the map
|
||||
logrus.Debugf("%s taking serviceVmsMutex", title)
|
||||
d.serviceVmsMutex.Lock()
|
||||
// Attempt to add ID to the service vm map
|
||||
logrus.Debugf("%s: Adding entry to service vm map", title)
|
||||
svm, exists, err := d.serviceVms.add(id)
|
||||
if err != nil && err == errVMisTerminating {
|
||||
// VM is in the process of terminating. Wait until it's done and and then try again
|
||||
logrus.Debugf("%s: VM with current ID still in the process of terminating: %s", title, id)
|
||||
if err := svm.getStopError(); err != nil {
|
||||
logrus.Debugf("%s: VM %s did not stop succesfully: %s", title, id, err)
|
||||
return nil, err
|
||||
}
|
||||
return d.startServiceVMIfNotRunning(id, mvdToAdd, context)
|
||||
} else if err != nil {
|
||||
logrus.Debugf("%s: failed to add service vm to map: %s", err)
|
||||
return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err)
|
||||
}
|
||||
|
||||
// Nothing to do if it's already running except add the mapped drive if supplied.
|
||||
if svm, ok := d.serviceVms[id]; ok {
|
||||
logrus.Debugf("%s exists, releasing serviceVmsMutex", title)
|
||||
d.serviceVmsMutex.Unlock()
|
||||
|
||||
if mvdToAdd != nil {
|
||||
logrus.Debugf("hot-adding %s to %s", mvdToAdd.HostPath, mvdToAdd.ContainerPath)
|
||||
|
||||
// Ensure the item is locked while doing this
|
||||
logrus.Debugf("%s locking serviceVmItem %s", title, svm.config.Name)
|
||||
svm.Lock()
|
||||
|
||||
if err := svm.config.HotAddVhd(mvdToAdd.HostPath, mvdToAdd.ContainerPath, false, true); err != nil {
|
||||
logrus.Debugf("%s releasing serviceVmItem %s on hot-add failure %s", title, svm.config.Name, err)
|
||||
svm.Unlock()
|
||||
return nil, fmt.Errorf("%s hot add %s to %s failed: %s", title, mvdToAdd.HostPath, mvdToAdd.ContainerPath, err)
|
||||
}
|
||||
|
||||
logrus.Debugf("%s releasing serviceVmItem %s", title, svm.config.Name)
|
||||
svm.Unlock()
|
||||
if exists {
|
||||
// Service VM is already up and running. In this case, just hot add the vhds.
|
||||
logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd)
|
||||
if err := svm.hotAddVHDs(mvdToAdd...); err != nil {
|
||||
logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err)
|
||||
return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err)
|
||||
}
|
||||
return svm, nil
|
||||
}
|
||||
|
||||
// Release the lock early
|
||||
logrus.Debugf("%s releasing serviceVmsMutex", title)
|
||||
d.serviceVmsMutex.Unlock()
|
||||
// We are the first service for this id, so we need to start it
|
||||
logrus.Debugf("%s: service vm doesn't exist. Now starting it up: %s", title, id)
|
||||
|
||||
// So we are starting one. First need an enpty structure.
|
||||
svm := &serviceVMItem{
|
||||
config: &client.Config{},
|
||||
}
|
||||
defer func() {
|
||||
// Signal that start has finished, passing in the error if any.
|
||||
svm.signalStartFinished(err)
|
||||
if err != nil {
|
||||
// We added a ref to the VM, since we failed, we should delete the ref.
|
||||
d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false)
|
||||
}
|
||||
}()
|
||||
|
||||
// Generate a default configuration
|
||||
if err := svm.config.GenerateDefault(d.options); err != nil {
|
||||
|
@ -335,12 +296,14 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedV
|
|||
svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd)
|
||||
svm.scratchAttached = true
|
||||
}
|
||||
|
||||
logrus.Debugf("%s releasing cachedScratchMutex", title)
|
||||
d.cachedScratchMutex.Unlock()
|
||||
|
||||
// If requested to start it with a mapped virtual disk, add it now.
|
||||
if mvdToAdd != nil {
|
||||
svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, *mvdToAdd)
|
||||
svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvdToAdd...)
|
||||
for _, mvd := range svm.config.MappedVirtualDisks {
|
||||
svm.attachedVHDs[mvd.HostPath] = 1
|
||||
}
|
||||
|
||||
// Start it.
|
||||
|
@ -349,108 +312,80 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedV
|
|||
return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err)
|
||||
}
|
||||
|
||||
// As it's now running, add it to the map, checking for a race where another
|
||||
// thread has simultaneously tried to start it.
|
||||
logrus.Debugf("%s locking serviceVmsMutex for insertion", title)
|
||||
d.serviceVmsMutex.Lock()
|
||||
if svm, ok := d.serviceVms[id]; ok {
|
||||
logrus.Debugf("%s releasing serviceVmsMutex after insertion but exists", title)
|
||||
d.serviceVmsMutex.Unlock()
|
||||
return svm, nil
|
||||
}
|
||||
d.serviceVms[id] = svm
|
||||
logrus.Debugf("%s releasing serviceVmsMutex after insertion", title)
|
||||
d.serviceVmsMutex.Unlock()
|
||||
// defer function to terminate the VM if the next steps fail
|
||||
defer func() {
|
||||
if err != nil {
|
||||
waitTerminate(svm, fmt.Sprintf("startServiceVmIfNotRunning: %s (%s)", id, context))
|
||||
}
|
||||
}()
|
||||
|
||||
// Now we have a running service VM, we can create the cached scratch file if it doesn't exist.
|
||||
logrus.Debugf("%s locking cachedScratchMutex", title)
|
||||
d.cachedScratchMutex.Lock()
|
||||
if _, err := os.Stat(d.cachedScratchFile); err != nil {
|
||||
logrus.Debugf("%s (%s): creating an SVM scratch - locking serviceVM", title, context)
|
||||
svm.Lock()
|
||||
logrus.Debugf("%s (%s): creating an SVM scratch", title, context)
|
||||
|
||||
// Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup,
|
||||
// but we're still in that process right now.
|
||||
if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil {
|
||||
logrus.Debugf("%s (%s): releasing serviceVM on error path from CreateExt4Vhdx: %s", title, context, err)
|
||||
svm.Unlock()
|
||||
logrus.Debugf("%s (%s): releasing cachedScratchMutex on error path", title, context)
|
||||
d.cachedScratchMutex.Unlock()
|
||||
|
||||
// Do a force terminate and remove it from the map on failure, ignoring any errors
|
||||
if err2 := d.terminateServiceVM(id, "error path from CreateExt4Vhdx", true); err2 != nil {
|
||||
logrus.Warnf("failed to terminate service VM on error path from CreateExt4Vhdx: %s", err2)
|
||||
}
|
||||
|
||||
logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err)
|
||||
return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err)
|
||||
}
|
||||
logrus.Debugf("%s (%s): releasing serviceVM after %s created and cached to %s", title, context, scratchTargetFile, d.cachedScratchFile)
|
||||
svm.Unlock()
|
||||
}
|
||||
logrus.Debugf("%s (%s): releasing cachedScratchMutex", title, context)
|
||||
d.cachedScratchMutex.Unlock()
|
||||
|
||||
// Hot-add the scratch-space if not already attached
|
||||
if !svm.scratchAttached {
|
||||
logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s - locking serviceVM", context, scratchTargetFile)
|
||||
svm.Lock()
|
||||
if err := svm.config.HotAddVhd(scratchTargetFile, toolsScratchPath, false, true); err != nil {
|
||||
logrus.Debugf("%s (%s): releasing serviceVM on error path of HotAddVhd: %s", title, context, err)
|
||||
svm.Unlock()
|
||||
|
||||
// Do a force terminate and remove it from the map on failure, ignoring any errors
|
||||
if err2 := d.terminateServiceVM(id, "error path from HotAddVhd", true); err2 != nil {
|
||||
logrus.Warnf("failed to terminate service VM on error path from HotAddVhd: %s", err2)
|
||||
}
|
||||
|
||||
logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s", context, scratchTargetFile)
|
||||
if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{
|
||||
HostPath: scratchTargetFile,
|
||||
ContainerPath: toolsScratchPath,
|
||||
CreateInUtilityVM: true,
|
||||
}); err != nil {
|
||||
logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err)
|
||||
return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err)
|
||||
}
|
||||
logrus.Debugf("%s (%s): releasing serviceVM", title, context)
|
||||
svm.Unlock()
|
||||
svm.scratchAttached = true
|
||||
}
|
||||
|
||||
logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) success", context)
|
||||
return svm, nil
|
||||
}
|
||||
|
||||
// getServiceVM returns the appropriate service utility VM instance, optionally
|
||||
// deleting it from the map (but not the global one)
|
||||
func (d *Driver) getServiceVM(id string, deleteFromMap bool) (*serviceVMItem, error) {
|
||||
logrus.Debugf("lcowdriver: getservicevm:locking serviceVmsMutex")
|
||||
d.serviceVmsMutex.Lock()
|
||||
defer func() {
|
||||
logrus.Debugf("lcowdriver: getservicevm:releasing serviceVmsMutex")
|
||||
d.serviceVmsMutex.Unlock()
|
||||
}()
|
||||
if d.globalMode {
|
||||
id = svmGlobalID
|
||||
}
|
||||
if _, ok := d.serviceVms[id]; !ok {
|
||||
return nil, fmt.Errorf("getservicevm for %s failed as not found", id)
|
||||
}
|
||||
svm := d.serviceVms[id]
|
||||
if deleteFromMap && id != svmGlobalID {
|
||||
logrus.Debugf("lcowdriver: getservicevm: removing %s from map", id)
|
||||
delete(d.serviceVms, id)
|
||||
}
|
||||
return svm, nil
|
||||
}
|
||||
|
||||
// terminateServiceVM terminates a service utility VM if its running, but does nothing
|
||||
// when in global mode as it's lifetime is limited to that of the daemon.
|
||||
func (d *Driver) terminateServiceVM(id, context string, force bool) error {
|
||||
|
||||
// terminateServiceVM terminates a service utility VM if its running if it's,
|
||||
// not being used by any goroutine, but does nothing when in global mode as it's
|
||||
// lifetime is limited to that of the daemon. If the force flag is set, then
|
||||
// the VM will be killed regardless of the ref count or if it's global.
|
||||
func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) {
|
||||
// We don't do anything in safe mode unless the force flag has been passed, which
|
||||
// is only the case for cleanup at driver termination.
|
||||
if d.globalMode {
|
||||
if !force {
|
||||
logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
|
||||
return nil
|
||||
}
|
||||
id = svmGlobalID
|
||||
if d.globalMode && !force {
|
||||
logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the service VM and delete it from the map
|
||||
svm, err := d.getServiceVM(id, true)
|
||||
if err != nil {
|
||||
return err
|
||||
id = d.getVMID(id)
|
||||
|
||||
var svm *serviceVM
|
||||
var lastRef bool
|
||||
if !force {
|
||||
// In the not force case, we ref count
|
||||
svm, lastRef, err = d.serviceVms.decrementRefCount(id)
|
||||
} else {
|
||||
// In the force case, we ignore the ref count and just set it to 0
|
||||
svm, err = d.serviceVms.setRefCountZero(id)
|
||||
lastRef = true
|
||||
}
|
||||
|
||||
if err == errVMUnknown {
|
||||
return nil
|
||||
} else if err == errVMisTerminating {
|
||||
return svm.getStopError()
|
||||
} else if !lastRef {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We run the deletion of the scratch as a deferred function to at least attempt
|
||||
|
@ -459,29 +394,67 @@ func (d *Driver) terminateServiceVM(id, context string, force bool) error {
|
|||
if svm.scratchAttached {
|
||||
scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id))
|
||||
logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile)
|
||||
if err := os.Remove(scratchTargetFile); err != nil {
|
||||
logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, err)
|
||||
if errRemove := os.Remove(scratchTargetFile); errRemove != nil {
|
||||
logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove)
|
||||
err = errRemove
|
||||
}
|
||||
}
|
||||
|
||||
// This function shouldn't actually return error unless there is a bug
|
||||
if errDelete := d.serviceVms.deleteID(id); errDelete != nil {
|
||||
logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete)
|
||||
}
|
||||
|
||||
// Signal that this VM has stopped
|
||||
svm.signalStopFinished(err)
|
||||
}()
|
||||
|
||||
// Nothing to do if it's not running
|
||||
if svm.config.Uvm != nil {
|
||||
logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - calling terminate", id, context)
|
||||
if err := svm.config.Uvm.Terminate(); err != nil {
|
||||
return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
|
||||
}
|
||||
// Now it's possible that the serivce VM failed to start and now we are trying to termiante it.
|
||||
// In this case, we will relay the error to the goroutines waiting for this vm to stop.
|
||||
if err := svm.getStartError(); err != nil {
|
||||
logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - waiting for utility VM to terminate", id, context)
|
||||
if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
|
||||
return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
|
||||
}
|
||||
if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context)
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitTerminate(svm *serviceVM, context string) error {
|
||||
if svm.config == nil {
|
||||
return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context)
|
||||
}
|
||||
|
||||
logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context)
|
||||
if err := svm.config.Uvm.Terminate(); err != nil {
|
||||
// We might get operation still pending from the HCS. In that case, we shouldn't return
|
||||
// an error since we call wait right after.
|
||||
underlyingError := err
|
||||
if conterr, ok := err.(*hcsshim.ContainerError); ok {
|
||||
underlyingError = conterr.Err
|
||||
}
|
||||
|
||||
if syscallErr, ok := underlyingError.(syscall.Errno); ok {
|
||||
underlyingError = syscallErr
|
||||
}
|
||||
|
||||
if underlyingError != errOperationPending {
|
||||
return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
|
||||
}
|
||||
logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context)
|
||||
}
|
||||
|
||||
logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context)
|
||||
if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
|
||||
return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the string representation of a driver. This should match
|
||||
// the name the graph driver has been registered with.
|
||||
func (d *Driver) String() string {
|
||||
|
@ -571,25 +544,18 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
|
|||
}()
|
||||
}
|
||||
|
||||
// Synchronise the operation in the service VM.
|
||||
logrus.Debugf("%s: locking svm for sandbox creation", title)
|
||||
svm.Lock()
|
||||
defer func() {
|
||||
logrus.Debugf("%s: releasing svm for sandbox creation", title)
|
||||
svm.Unlock()
|
||||
}()
|
||||
|
||||
// Make sure we don't write to our local cached copy if this is for a non-default size request.
|
||||
targetCacheFile := d.cachedSandboxFile
|
||||
if sandboxSize != client.DefaultVhdxSizeGB {
|
||||
targetCacheFile = ""
|
||||
}
|
||||
|
||||
// Actually do the creation.
|
||||
if err := svm.config.CreateExt4Vhdx(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
|
||||
// Create the ext4 vhdx
|
||||
logrus.Debugf("%s: creating sandbox ext4 vhdx", title)
|
||||
if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
|
||||
logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -638,6 +604,21 @@ func (d *Driver) Remove(id string) error {
|
|||
layerPath := d.dir(id)
|
||||
|
||||
logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath)
|
||||
|
||||
// Unmount all the layers
|
||||
err := d.Put(id)
|
||||
if err != nil {
|
||||
logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// for non-global case just kill the vm
|
||||
if !d.globalMode {
|
||||
if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -659,43 +640,24 @@ func (d *Driver) Remove(id string) error {
|
|||
// For optimisation, we don't actually mount the filesystem (which in our
|
||||
// case means [hot-]adding it to a service VM. But we track that and defer
|
||||
// the actual adding to the point we need to access it.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
||||
title := fmt.Sprintf("lcowdriver: get: %s", id)
|
||||
logrus.Debugf(title)
|
||||
|
||||
// Work out what we are working on
|
||||
ld, err := getLayerDetails(d.dir(id))
|
||||
// Generate the mounts needed for the defered operation.
|
||||
disks, err := d.getAllMounts(id)
|
||||
if err != nil {
|
||||
logrus.Debugf("%s failed to get layer details from %s: %s", title, d.dir(id), err)
|
||||
return "", fmt.Errorf("%s failed to open layer or sandbox VHD to open in %s: %s", title, d.dir(id), err)
|
||||
logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
|
||||
return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
|
||||
}
|
||||
logrus.Debugf("%s %s, size %d, isSandbox %t", title, ld.filename, ld.size, ld.isSandbox)
|
||||
|
||||
// Add item to cache, or update existing item, but ensure we have the
|
||||
// lock while updating items.
|
||||
logrus.Debugf("%s: locking cacheMutex", title)
|
||||
d.cacheMutex.Lock()
|
||||
var ci *cacheItem
|
||||
if item, ok := d.cache[id]; !ok {
|
||||
// The item is not currently in the cache.
|
||||
ci = &cacheItem{
|
||||
refCount: 1,
|
||||
isSandbox: ld.isSandbox,
|
||||
hostPath: ld.filename,
|
||||
uvmPath: fmt.Sprintf("/mnt/%s", id),
|
||||
isMounted: false, // we defer this as an optimisation
|
||||
}
|
||||
d.cache[id] = ci
|
||||
logrus.Debugf("%s: added cache item %+v", title, ci)
|
||||
} else {
|
||||
// Increment the reference counter in the cache.
|
||||
item.incrementRefCount()
|
||||
}
|
||||
logrus.Debugf("%s: releasing cacheMutex", title)
|
||||
d.cacheMutex.Unlock()
|
||||
|
||||
logrus.Debugf("%s %s success. %s: %+v: size %d", title, id, d.dir(id), ci, ld.size)
|
||||
return d.dir(id), nil
|
||||
logrus.Debugf("%s: got layer mounts: %+v", title, disks)
|
||||
return &lcowfs{
|
||||
root: unionMountName(disks),
|
||||
d: d,
|
||||
mappedDisks: disks,
|
||||
vmID: d.getVMID(id),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put does the reverse of get. If there are no more references to
|
||||
|
@ -703,56 +665,45 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
func (d *Driver) Put(id string) error {
|
||||
title := fmt.Sprintf("lcowdriver: put: %s", id)
|
||||
|
||||
logrus.Debugf("%s: locking cacheMutex", title)
|
||||
d.cacheMutex.Lock()
|
||||
item, ok := d.cache[id]
|
||||
if !ok {
|
||||
logrus.Debugf("%s: releasing cacheMutex on error path", title)
|
||||
d.cacheMutex.Unlock()
|
||||
return fmt.Errorf("%s possible ref-count error, or invalid id was passed to the graphdriver. Cannot handle id %s as it's not in the cache", title, id)
|
||||
}
|
||||
|
||||
// Decrement the ref-count, and nothing more to do if still in use.
|
||||
if item.decrementRefCount() > 0 {
|
||||
logrus.Debugf("%s: releasing cacheMutex. Cache item is still in use", title)
|
||||
d.cacheMutex.Unlock()
|
||||
// Get the service VM that we need to remove from
|
||||
svm, err := d.serviceVms.get(d.getVMID(id))
|
||||
if err == errVMUnknown {
|
||||
return nil
|
||||
} else if err == errVMisTerminating {
|
||||
return svm.getStopError()
|
||||
}
|
||||
|
||||
// Remove from the cache map.
|
||||
delete(d.cache, id)
|
||||
logrus.Debugf("%s: releasing cacheMutex. Ref count on cache item has dropped to zero, removed from cache", title)
|
||||
d.cacheMutex.Unlock()
|
||||
// Generate the mounts that Get() might have mounted
|
||||
disks, err := d.getAllMounts(id)
|
||||
if err != nil {
|
||||
logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
|
||||
return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
|
||||
}
|
||||
|
||||
// If we have done a mount and we are in global mode, then remove it. We don't
|
||||
// need to remove in safe mode as the service VM is going to be torn down anyway.
|
||||
if d.globalMode {
|
||||
logrus.Debugf("%s: locking cache item at zero ref-count", title)
|
||||
item.Lock()
|
||||
defer func() {
|
||||
logrus.Debugf("%s: releasing cache item at zero ref-count", title)
|
||||
item.Unlock()
|
||||
}()
|
||||
if item.isMounted {
|
||||
svm, err := d.getServiceVM(id, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Now, we want to perform the unmounts, hot-remove and stop the service vm.
|
||||
// We want to go though all the steps even if we have an error to clean up properly
|
||||
err = svm.deleteUnionMount(unionMountName(disks), disks...)
|
||||
if err != nil {
|
||||
logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err)
|
||||
}
|
||||
|
||||
logrus.Debugf("%s: Hot-Removing %s. Locking svm", title, item.hostPath)
|
||||
svm.Lock()
|
||||
if err := svm.config.HotRemoveVhd(item.hostPath); err != nil {
|
||||
logrus.Debugf("%s: releasing svm on error path", title)
|
||||
svm.Unlock()
|
||||
return fmt.Errorf("%s failed to hot-remove %s from global service utility VM: %s", title, item.hostPath, err)
|
||||
}
|
||||
logrus.Debugf("%s: releasing svm", title)
|
||||
svm.Unlock()
|
||||
err1 := svm.hotRemoveVHDs(disks...)
|
||||
if err1 != nil {
|
||||
logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err)
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("%s %s: refCount 0. %s (%s) completed successfully", title, id, item.hostPath, item.uvmPath)
|
||||
return nil
|
||||
err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false)
|
||||
if err1 != nil {
|
||||
logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1)
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
}
|
||||
logrus.Debugf("Put succeeded on id %s", id)
|
||||
return err
|
||||
}
|
||||
|
||||
// Cleanup ensures the information the driver stores is properly removed.
|
||||
|
@ -761,15 +712,6 @@ func (d *Driver) Put(id string) error {
|
|||
func (d *Driver) Cleanup() error {
|
||||
title := "lcowdriver: cleanup"
|
||||
|
||||
d.cacheMutex.Lock()
|
||||
for k, v := range d.cache {
|
||||
logrus.Debugf("%s cache item: %s: %+v", title, k, v)
|
||||
if v.refCount > 0 {
|
||||
logrus.Warnf("%s leaked %s: %+v", title, k, v)
|
||||
}
|
||||
}
|
||||
d.cacheMutex.Unlock()
|
||||
|
||||
items, err := ioutil.ReadDir(d.dataRoot)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
|
@ -794,8 +736,8 @@ func (d *Driver) Cleanup() error {
|
|||
|
||||
// Cleanup any service VMs we have running, along with their scratch spaces.
|
||||
// We don't take the lock for this as it's taken in terminateServiceVm.
|
||||
for k, v := range d.serviceVms {
|
||||
logrus.Debugf("%s svm: %s: %+v", title, k, v)
|
||||
for k, v := range d.serviceVms.svms {
|
||||
logrus.Debugf("%s svm entry: %s: %+v", title, k, v)
|
||||
d.terminateServiceVM(k, "cleanup", true)
|
||||
}
|
||||
|
||||
|
@ -812,65 +754,41 @@ func (d *Driver) Cleanup() error {
|
|||
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||
title := fmt.Sprintf("lcowdriver: diff: %s", id)
|
||||
|
||||
logrus.Debugf("%s: locking cacheMutex", title)
|
||||
d.cacheMutex.Lock()
|
||||
if _, ok := d.cache[id]; !ok {
|
||||
logrus.Debugf("%s: releasing cacheMutex on error path", title)
|
||||
d.cacheMutex.Unlock()
|
||||
return nil, fmt.Errorf("%s fail as %s is not in the cache", title, id)
|
||||
}
|
||||
ci := d.cache[id]
|
||||
logrus.Debugf("%s: releasing cacheMutex", title)
|
||||
d.cacheMutex.Unlock()
|
||||
|
||||
// Stat to get size
|
||||
logrus.Debugf("%s: locking cacheItem", title)
|
||||
ci.Lock()
|
||||
fileInfo, err := os.Stat(ci.hostPath)
|
||||
// Get VHDX info
|
||||
ld, err := getLayerDetails(d.dir(id))
|
||||
if err != nil {
|
||||
logrus.Debugf("%s: releasing cacheItem on error path", title)
|
||||
ci.Unlock()
|
||||
return nil, fmt.Errorf("%s failed to stat %s: %s", title, ci.hostPath, err)
|
||||
logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err)
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("%s: releasing cacheItem", title)
|
||||
ci.Unlock()
|
||||
|
||||
// Start the SVM with a mapped virtual disk. Note that if the SVM is
|
||||
// already running and we are in global mode, this will be
|
||||
// hot-added.
|
||||
mvd := &hcsshim.MappedVirtualDisk{
|
||||
HostPath: ci.hostPath,
|
||||
ContainerPath: ci.uvmPath,
|
||||
mvd := hcsshim.MappedVirtualDisk{
|
||||
HostPath: ld.filename,
|
||||
ContainerPath: hostToGuest(ld.filename),
|
||||
CreateInUtilityVM: true,
|
||||
ReadOnly: true,
|
||||
}
|
||||
|
||||
logrus.Debugf("%s: starting service VM", title)
|
||||
svm, err := d.startServiceVMIfNotRunning(id, mvd, fmt.Sprintf("diff %s", id))
|
||||
svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set `isMounted` for the cache item. Note that we re-scan the cache
|
||||
// at this point as it's possible the cacheItem changed during the long-
|
||||
// running operation above when we weren't holding the cacheMutex lock.
|
||||
logrus.Debugf("%s: locking cacheMutex for updating isMounted", title)
|
||||
d.cacheMutex.Lock()
|
||||
if _, ok := d.cache[id]; !ok {
|
||||
logrus.Debugf("%s: releasing cacheMutex on error path of isMounted", title)
|
||||
d.cacheMutex.Unlock()
|
||||
logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting")
|
||||
err = svm.getStartError()
|
||||
if err != nil {
|
||||
d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
|
||||
return nil, fmt.Errorf("%s fail as %s is not in the cache when updating isMounted", title, id)
|
||||
return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err)
|
||||
}
|
||||
ci = d.cache[id]
|
||||
ci.setIsMounted()
|
||||
logrus.Debugf("%s: releasing cacheMutex for updating isMounted", title)
|
||||
d.cacheMutex.Unlock()
|
||||
|
||||
// Obtain the tar stream for it
|
||||
logrus.Debugf("%s %s, size %d, isSandbox %t", title, ci.hostPath, fileInfo.Size(), ci.isSandbox)
|
||||
tarReadCloser, err := svm.config.VhdToTar(ci.hostPath, ci.uvmPath, ci.isSandbox, fileInfo.Size())
|
||||
logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, mvd.ContainerPath, ld.size, ld.isSandbox)
|
||||
tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, mvd.ContainerPath, ld.isSandbox, ld.size)
|
||||
if err != nil {
|
||||
svm.hotRemoveVHDs(mvd)
|
||||
d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
|
||||
return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err)
|
||||
}
|
||||
|
@ -878,14 +796,12 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
|||
logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent)
|
||||
|
||||
// In safe/non-global mode, we can't tear down the service VM until things have been read.
|
||||
if !d.globalMode {
|
||||
return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
|
||||
tarReadCloser.Close()
|
||||
d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
|
||||
return nil
|
||||
}), nil
|
||||
}
|
||||
return tarReadCloser, nil
|
||||
return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
|
||||
tarReadCloser.Close()
|
||||
svm.hotRemoveVHDs(mvd)
|
||||
d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
|
||||
return nil
|
||||
}), nil
|
||||
}
|
||||
|
||||
// ApplyDiff extracts the changeset from the given diff into the
|
||||
|
@ -902,6 +818,12 @@ func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
|
|||
}
|
||||
defer d.terminateServiceVM(id, fmt.Sprintf("applydiff %s", id), false)
|
||||
|
||||
logrus.Debugf("lcowdriver: applydiff: waiting for svm to finish booting")
|
||||
err = svm.getStartError()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err)
|
||||
}
|
||||
|
||||
// TODO @jhowardmsft - the retries are temporary to overcome platform reliablity issues.
|
||||
// Obviously this will be removed as platform bugs are fixed.
|
||||
retries := 0
|
||||
|
@ -944,6 +866,11 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
|
|||
return m, nil
|
||||
}
|
||||
|
||||
// GetLayerPath gets the layer path on host (path to VHD/VHDX)
|
||||
func (d *Driver) GetLayerPath(id string) (string, error) {
|
||||
return d.dir(id), nil
|
||||
}
|
||||
|
||||
// dir returns the absolute path to the layer.
|
||||
func (d *Driver) dir(id string) string {
|
||||
return filepath.Join(d.dataRoot, filepath.Base(id))
|
||||
|
@ -1006,3 +933,34 @@ func getLayerDetails(folder string) (*layerDetails, error) {
|
|||
|
||||
return ld, nil
|
||||
}
|
||||
|
||||
func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) {
|
||||
layerChain, err := d.getLayerChain(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerChain = append([]string{d.dir(id)}, layerChain...)
|
||||
|
||||
logrus.Debugf("getting all layers: %v", layerChain)
|
||||
disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain))
|
||||
for i := range layerChain {
|
||||
ld, err := getLayerDetails(layerChain[i])
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err)
|
||||
return nil, err
|
||||
}
|
||||
disks[i].HostPath = ld.filename
|
||||
disks[i].ContainerPath = hostToGuest(ld.filename)
|
||||
disks[i].CreateInUtilityVM = true
|
||||
disks[i].ReadOnly = !ld.isSandbox
|
||||
}
|
||||
return disks, nil
|
||||
}
|
||||
|
||||
func hostToGuest(hostpath string) string {
|
||||
return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath)))
|
||||
}
|
||||
|
||||
func unionMountName(disks []hcsshim.MappedVirtualDisk) string {
|
||||
return fmt.Sprintf("%s-mount", disks[0].ContainerPath)
|
||||
}
|
||||
|
|
373
daemon/graphdriver/lcow/lcow_svm.go
Normal file
373
daemon/graphdriver/lcow/lcow_svm.go
Normal file
|
@ -0,0 +1,373 @@
|
|||
// +build windows
|
||||
|
||||
package lcow
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Microsoft/opengcs/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Code for all the service VM management for the LCOW graphdriver
|
||||
|
||||
var errVMisTerminating = errors.New("service VM is shutting down")
|
||||
var errVMUnknown = errors.New("service vm id is unknown")
|
||||
var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used")
|
||||
|
||||
// serviceVMMap is the struct representing the id -> service VM mapping.
|
||||
type serviceVMMap struct {
|
||||
sync.Mutex
|
||||
svms map[string]*serviceVMMapItem
|
||||
}
|
||||
|
||||
// serviceVMMapItem is our internal structure representing an item in our
|
||||
// map of service VMs we are maintaining.
|
||||
type serviceVMMapItem struct {
|
||||
svm *serviceVM // actual service vm object
|
||||
refCount int // refcount for VM
|
||||
}
|
||||
|
||||
type serviceVM struct {
|
||||
sync.Mutex // Serialises operations being performed in this service VM.
|
||||
scratchAttached bool // Has a scratch been attached?
|
||||
config *client.Config // Represents the service VM item.
|
||||
|
||||
// Indicates that the vm is started
|
||||
startStatus chan interface{}
|
||||
startError error
|
||||
|
||||
// Indicates that the vm is stopped
|
||||
stopStatus chan interface{}
|
||||
stopError error
|
||||
|
||||
attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed.
|
||||
unionMounts map[string]int // Map ref counting all the union filesystems we mounted.
|
||||
}
|
||||
|
||||
// add will add an id to the service vm map. There are three cases:
|
||||
// - entry doesn't exist:
|
||||
// - add id to map and return a new vm that the caller can manually configure+start
|
||||
// - entry does exist
|
||||
// - return vm in map and increment ref count
|
||||
// - entry does exist but the ref count is 0
|
||||
// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
|
||||
func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
if svm, ok := svmMap.svms[id]; ok {
|
||||
if svm.refCount == 0 {
|
||||
return svm.svm, true, errVMisTerminating
|
||||
}
|
||||
svm.refCount++
|
||||
return svm.svm, true, nil
|
||||
}
|
||||
|
||||
// Doesn't exist, so create an empty svm to put into map and return
|
||||
newSVM := &serviceVM{
|
||||
startStatus: make(chan interface{}),
|
||||
stopStatus: make(chan interface{}),
|
||||
attachedVHDs: make(map[string]int),
|
||||
unionMounts: make(map[string]int),
|
||||
config: &client.Config{},
|
||||
}
|
||||
svmMap.svms[id] = &serviceVMMapItem{
|
||||
svm: newSVM,
|
||||
refCount: 1,
|
||||
}
|
||||
return newSVM, false, nil
|
||||
}
|
||||
|
||||
// get will get the service vm from the map. There are three cases:
|
||||
// - entry doesn't exist:
|
||||
// - return errVMUnknown
|
||||
// - entry does exist
|
||||
// - return vm with no error
|
||||
// - entry does exist but the ref count is 0
|
||||
// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
|
||||
func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
svm, ok := svmMap.svms[id]
|
||||
if !ok {
|
||||
return nil, errVMUnknown
|
||||
}
|
||||
if svm.refCount == 0 {
|
||||
return svm.svm, errVMisTerminating
|
||||
}
|
||||
return svm.svm, nil
|
||||
}
|
||||
|
||||
// decrementRefCount decrements the ref count of the given ID from the map. There are four cases:
|
||||
// - entry doesn't exist:
|
||||
// - return errVMUnknown
|
||||
// - entry does exist but the ref count is 0
|
||||
// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
|
||||
// - entry does exist but ref count is 1
|
||||
// - return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map
|
||||
// - and execute svm.signalStopFinished to signal the threads that the svm has been terminated.
|
||||
// - entry does exist and ref count > 1
|
||||
// - just reduce ref count and return svm
|
||||
func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
|
||||
svm, ok := svmMap.svms[id]
|
||||
if !ok {
|
||||
return nil, false, errVMUnknown
|
||||
}
|
||||
if svm.refCount == 0 {
|
||||
return svm.svm, false, errVMisTerminating
|
||||
}
|
||||
svm.refCount--
|
||||
return svm.svm, svm.refCount == 0, nil
|
||||
}
|
||||
|
||||
// setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it.
|
||||
func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
|
||||
svm, ok := svmMap.svms[id]
|
||||
if !ok {
|
||||
return nil, errVMUnknown
|
||||
}
|
||||
if svm.refCount == 0 {
|
||||
return svm.svm, errVMisTerminating
|
||||
}
|
||||
svm.refCount = 0
|
||||
return svm.svm, nil
|
||||
}
|
||||
|
||||
// deleteID deletes the given ID from the map. If the refcount is not 0 or the
|
||||
// VM does not exist, then this function returns an error.
|
||||
func (svmMap *serviceVMMap) deleteID(id string) error {
|
||||
svmMap.Lock()
|
||||
defer svmMap.Unlock()
|
||||
svm, ok := svmMap.svms[id]
|
||||
if !ok {
|
||||
return errVMUnknown
|
||||
}
|
||||
if svm.refCount != 0 {
|
||||
return errVMStillHasReference
|
||||
}
|
||||
delete(svmMap.svms, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svm *serviceVM) signalStartFinished(err error) {
|
||||
svm.Lock()
|
||||
svm.startError = err
|
||||
svm.Unlock()
|
||||
close(svm.startStatus)
|
||||
}
|
||||
|
||||
func (svm *serviceVM) getStartError() error {
|
||||
<-svm.startStatus
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
return svm.startError
|
||||
}
|
||||
|
||||
func (svm *serviceVM) signalStopFinished(err error) {
|
||||
svm.Lock()
|
||||
svm.stopError = err
|
||||
svm.Unlock()
|
||||
close(svm.stopStatus)
|
||||
}
|
||||
|
||||
func (svm *serviceVM) getStopError() error {
|
||||
<-svm.stopStatus
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
return svm.stopError
|
||||
}
|
||||
|
||||
// hotAddVHDs waits for the service vm to start and then attaches the vhds.
|
||||
func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
|
||||
if err := svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
return svm.hotAddVHDsAtStart(mvds...)
|
||||
}
|
||||
|
||||
// hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start.
|
||||
func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error {
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
for i, mvd := range mvds {
|
||||
if _, ok := svm.attachedVHDs[mvd.HostPath]; ok {
|
||||
svm.attachedVHDs[mvd.HostPath]++
|
||||
continue
|
||||
}
|
||||
|
||||
if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil {
|
||||
svm.hotRemoveVHDsAtStart(mvds[:i]...)
|
||||
return err
|
||||
}
|
||||
svm.attachedVHDs[mvd.HostPath] = 1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hotRemoveVHDs waits for the service vm to start and then removes the vhds.
|
||||
func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
|
||||
if err := svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
return svm.hotRemoveVHDsAtStart(mvds...)
|
||||
}
|
||||
|
||||
// hotRemoveVHDsAtStart works the same way as hotRemoveVHDs but does not wait for the VM to start.
|
||||
func (svm *serviceVM) hotRemoveVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error {
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
var retErr error
|
||||
for _, mvd := range mvds {
|
||||
if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok {
|
||||
// We continue instead of returning an error if we try to hot remove a non-existent VHD.
|
||||
// This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get()
|
||||
// defers the VM start to the first operation, it's possible that nothing have been hot-added
|
||||
// when Put() is called. To avoid Put returning an error in that case, we simply continue if we
|
||||
// don't find the vhd attached.
|
||||
continue
|
||||
}
|
||||
|
||||
if svm.attachedVHDs[mvd.HostPath] > 1 {
|
||||
svm.attachedVHDs[mvd.HostPath]--
|
||||
continue
|
||||
}
|
||||
|
||||
// last VHD, so remove from VM and map
|
||||
if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil {
|
||||
delete(svm.attachedVHDs, mvd.HostPath)
|
||||
} else {
|
||||
// Take note of the error, but still continue to remove the other VHDs
|
||||
logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err)
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error {
|
||||
if err := svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile)
|
||||
}
|
||||
|
||||
func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) {
|
||||
if len(mvds) == 0 {
|
||||
return fmt.Errorf("createUnionMount: error must have at least 1 layer")
|
||||
}
|
||||
|
||||
if err = svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
if _, ok := svm.unionMounts[mountName]; ok {
|
||||
svm.unionMounts[mountName]++
|
||||
return nil
|
||||
}
|
||||
|
||||
var lowerLayers []string
|
||||
if mvds[0].ReadOnly {
|
||||
lowerLayers = append(lowerLayers, mvds[0].ContainerPath)
|
||||
}
|
||||
|
||||
for i := 1; i < len(mvds); i++ {
|
||||
lowerLayers = append(lowerLayers, mvds[i].ContainerPath)
|
||||
}
|
||||
|
||||
logrus.Debugf("Doing the overlay mount with union directory=%s", mountName)
|
||||
if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var cmd string
|
||||
if mvds[0].ReadOnly {
|
||||
// Readonly overlay
|
||||
cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s",
|
||||
strings.Join(lowerLayers, ","),
|
||||
mountName)
|
||||
} else {
|
||||
upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath)
|
||||
work := fmt.Sprintf("%s/work", mvds[0].ContainerPath)
|
||||
|
||||
if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s",
|
||||
strings.Join(lowerLayers, ":"),
|
||||
upper,
|
||||
work,
|
||||
mountName)
|
||||
}
|
||||
|
||||
logrus.Debugf("createUnionMount: Executing mount=%s", cmd)
|
||||
if err = svm.runProcess(cmd, nil, nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
svm.unionMounts[mountName] = 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error {
|
||||
if err := svm.getStartError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
svm.Lock()
|
||||
defer svm.Unlock()
|
||||
if _, ok := svm.unionMounts[mountName]; !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if svm.unionMounts[mountName] > 1 {
|
||||
svm.unionMounts[mountName]--
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Removing union mount %s", mountName)
|
||||
if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(svm.unionMounts, mountName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
|
||||
process, err := svm.config.RunProcess(command, stdin, stdout, stderr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer process.Close()
|
||||
|
||||
process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds))
|
||||
exitCode, err := process.ExitCode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode)
|
||||
}
|
||||
return nil
|
||||
}
|
139
daemon/graphdriver/lcow/remotefs.go
Normal file
139
daemon/graphdriver/lcow/remotefs.go
Normal file
|
@ -0,0 +1,139 @@
|
|||
// +build windows
|
||||
|
||||
package lcow
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type lcowfs struct {
|
||||
root string
|
||||
d *Driver
|
||||
mappedDisks []hcsshim.MappedVirtualDisk
|
||||
vmID string
|
||||
currentSVM *serviceVM
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
var _ containerfs.ContainerFS = &lcowfs{}
|
||||
|
||||
// ErrNotSupported is an error for unsupported operations in the remotefs
|
||||
var ErrNotSupported = fmt.Errorf("not supported")
|
||||
|
||||
// Functions to implement the ContainerFS interface
|
||||
func (l *lcowfs) Path() string {
|
||||
return l.root
|
||||
}
|
||||
|
||||
func (l *lcowfs) ResolveScopedPath(path string, rawPath bool) (string, error) {
|
||||
logrus.Debugf("remotefs.resolvescopedpath inputs: %s %s ", path, l.root)
|
||||
|
||||
arg1 := l.Join(l.root, path)
|
||||
if !rawPath {
|
||||
// The l.Join("/", path) will make path an absolute path and then clean it
|
||||
// so if path = ../../X, it will become /X.
|
||||
arg1 = l.Join(l.root, l.Join("/", path))
|
||||
}
|
||||
arg2 := l.root
|
||||
|
||||
output := &bytes.Buffer{}
|
||||
if err := l.runRemoteFSProcess(nil, output, remotefs.ResolvePathCmd, arg1, arg2); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
logrus.Debugf("remotefs.resolvescopedpath success. Output: %s\n", output.String())
|
||||
return output.String(), nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) OS() string {
|
||||
return "linux"
|
||||
}
|
||||
|
||||
func (l *lcowfs) Architecture() string {
|
||||
return runtime.GOARCH
|
||||
}
|
||||
|
||||
// Other functions that are used by docker like the daemon Archiver/Extractor
|
||||
func (l *lcowfs) ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error {
|
||||
logrus.Debugf("remotefs.ExtractArchve inputs: %s %+v", dst, opts)
|
||||
|
||||
tarBuf := &bytes.Buffer{}
|
||||
if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
|
||||
return fmt.Errorf("failed to marshall tar opts: %s", err)
|
||||
}
|
||||
|
||||
input := io.MultiReader(tarBuf, src)
|
||||
if err := l.runRemoteFSProcess(input, nil, remotefs.ExtractArchiveCmd, dst); err != nil {
|
||||
return fmt.Errorf("failed to extract archive to %s: %s", dst, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) {
|
||||
logrus.Debugf("remotefs.ArchivePath: %s %+v", src, opts)
|
||||
|
||||
tarBuf := &bytes.Buffer{}
|
||||
if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
|
||||
return nil, fmt.Errorf("failed to marshall tar opts: %s", err)
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
defer w.Close()
|
||||
if err := l.runRemoteFSProcess(tarBuf, w, remotefs.ArchivePathCmd, src); err != nil {
|
||||
logrus.Debugf("REMOTEFS: Failed to extract archive: %s %+v %s", src, opts, err)
|
||||
}
|
||||
}()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
func (l *lcowfs) startVM() error {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
if l.currentSVM != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
svm, err := l.d.startServiceVMIfNotRunning(l.vmID, l.mappedDisks, fmt.Sprintf("lcowfs.startVM"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = svm.createUnionMount(l.root, l.mappedDisks...); err != nil {
|
||||
return err
|
||||
}
|
||||
l.currentSVM = svm
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) runRemoteFSProcess(stdin io.Reader, stdout io.Writer, args ...string) error {
|
||||
if err := l.startVM(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append remotefs prefix and setup as a command line string
|
||||
cmd := fmt.Sprintf("%s %s", remotefs.RemotefsCmd, strings.Join(args, " "))
|
||||
stderr := &bytes.Buffer{}
|
||||
if err := l.currentSVM.runProcess(cmd, stdin, stdout, stderr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eerr, err := remotefs.ReadError(stderr)
|
||||
if eerr != nil {
|
||||
// Process returned an error so return that.
|
||||
return remotefs.ExportedToError(eerr)
|
||||
}
|
||||
return err
|
||||
}
|
211
daemon/graphdriver/lcow/remotefs_file.go
Normal file
211
daemon/graphdriver/lcow/remotefs_file.go
Normal file
|
@ -0,0 +1,211 @@
|
|||
// +build windows
|
||||
|
||||
package lcow
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
|
||||
"github.com/containerd/continuity/driver"
|
||||
)
|
||||
|
||||
type lcowfile struct {
|
||||
process hcsshim.Process
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderr io.ReadCloser
|
||||
fs *lcowfs
|
||||
guestPath string
|
||||
}
|
||||
|
||||
func (l *lcowfs) Open(path string) (driver.File, error) {
|
||||
return l.OpenFile(path, os.O_RDONLY, 0)
|
||||
}
|
||||
|
||||
func (l *lcowfs) OpenFile(path string, flag int, perm os.FileMode) (_ driver.File, err error) {
|
||||
flagStr := strconv.FormatInt(int64(flag), 10)
|
||||
permStr := strconv.FormatUint(uint64(perm), 8)
|
||||
|
||||
commandLine := fmt.Sprintf("%s %s %s %s", remotefs.RemotefsCmd, remotefs.OpenFileCmd, flagStr, permStr)
|
||||
env := make(map[string]string)
|
||||
env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:"
|
||||
processConfig := &hcsshim.ProcessConfig{
|
||||
EmulateConsole: false,
|
||||
CreateStdInPipe: true,
|
||||
CreateStdOutPipe: true,
|
||||
CreateStdErrPipe: true,
|
||||
CreateInUtilityVm: true,
|
||||
WorkingDirectory: "/bin",
|
||||
Environment: env,
|
||||
CommandLine: commandLine,
|
||||
}
|
||||
|
||||
process, err := l.currentSVM.config.Uvm.CreateProcess(processConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file %s: %s", path, err)
|
||||
}
|
||||
|
||||
stdin, stdout, stderr, err := process.Stdio()
|
||||
if err != nil {
|
||||
process.Kill()
|
||||
process.Close()
|
||||
return nil, fmt.Errorf("failed to open file pipes %s: %s", path, err)
|
||||
}
|
||||
|
||||
lf := &lcowfile{
|
||||
process: process,
|
||||
stdin: stdin,
|
||||
stdout: stdout,
|
||||
stderr: stderr,
|
||||
fs: l,
|
||||
guestPath: path,
|
||||
}
|
||||
|
||||
if _, err := lf.getResponse(); err != nil {
|
||||
return nil, fmt.Errorf("failed to open file %s: %s", path, err)
|
||||
}
|
||||
return lf, nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) Read(b []byte) (int, error) {
|
||||
hdr := &remotefs.FileHeader{
|
||||
Cmd: remotefs.Read,
|
||||
Size: uint64(len(b)),
|
||||
}
|
||||
|
||||
if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
buf, err := l.getResponse()
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
n := copy(b, buf)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) Write(b []byte) (int, error) {
|
||||
hdr := &remotefs.FileHeader{
|
||||
Cmd: remotefs.Write,
|
||||
Size: uint64(len(b)),
|
||||
}
|
||||
|
||||
if err := remotefs.WriteFileHeader(l.stdin, hdr, b); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, err := l.getResponse()
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) Seek(offset int64, whence int) (int64, error) {
|
||||
seekHdr := &remotefs.SeekHeader{
|
||||
Offset: offset,
|
||||
Whence: int32(whence),
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
if err := binary.Write(buf, binary.BigEndian, seekHdr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
hdr := &remotefs.FileHeader{
|
||||
Cmd: remotefs.Write,
|
||||
Size: uint64(buf.Len()),
|
||||
}
|
||||
if err := remotefs.WriteFileHeader(l.stdin, hdr, buf.Bytes()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
resBuf, err := l.getResponse()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var res int64
|
||||
if err := binary.Read(bytes.NewBuffer(resBuf), binary.BigEndian, &res); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) Close() error {
|
||||
hdr := &remotefs.FileHeader{
|
||||
Cmd: remotefs.Close,
|
||||
Size: 0,
|
||||
}
|
||||
|
||||
if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := l.getResponse()
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *lcowfile) Readdir(n int) ([]os.FileInfo, error) {
|
||||
nStr := strconv.FormatInt(int64(n), 10)
|
||||
|
||||
// Unlike the other File functions, this one can just be run without maintaining state,
|
||||
// so just do the normal runRemoteFSProcess way.
|
||||
buf := &bytes.Buffer{}
|
||||
if err := l.fs.runRemoteFSProcess(nil, buf, remotefs.ReadDirCmd, l.guestPath, nStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info []remotefs.FileInfo
|
||||
if err := json.Unmarshal(buf.Bytes(), &info); err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
osInfo := make([]os.FileInfo, len(info))
|
||||
for i := range info {
|
||||
osInfo[i] = &info[i]
|
||||
}
|
||||
return osInfo, nil
|
||||
}
|
||||
|
||||
func (l *lcowfile) getResponse() ([]byte, error) {
|
||||
hdr, err := remotefs.ReadFileHeader(l.stdout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hdr.Cmd != remotefs.CmdOK {
|
||||
// Something went wrong during the openfile in the server.
|
||||
// Parse stderr and return that as an error
|
||||
eerr, err := remotefs.ReadError(l.stderr)
|
||||
if eerr != nil {
|
||||
return nil, remotefs.ExportedToError(eerr)
|
||||
}
|
||||
|
||||
// Maybe the parsing went wrong?
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// At this point, we know something went wrong in the remotefs program, but
|
||||
// we we don't know why.
|
||||
return nil, fmt.Errorf("unknown error")
|
||||
}
|
||||
|
||||
// Successful command, we might have some data to read (for Read + Seek)
|
||||
buf := make([]byte, hdr.Size, hdr.Size)
|
||||
if _, err := io.ReadFull(l.stdout, buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
123
daemon/graphdriver/lcow/remotefs_filedriver.go
Normal file
123
daemon/graphdriver/lcow/remotefs_filedriver.go
Normal file
|
@ -0,0 +1,123 @@
|
|||
// +build windows
|
||||
|
||||
package lcow
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ driver.Driver = &lcowfs{}
|
||||
|
||||
func (l *lcowfs) Readlink(p string) (string, error) {
|
||||
logrus.Debugf("removefs.readlink args: %s", p)
|
||||
|
||||
result := &bytes.Buffer{}
|
||||
if err := l.runRemoteFSProcess(nil, result, remotefs.ReadlinkCmd, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.String(), nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) Mkdir(path string, mode os.FileMode) error {
|
||||
return l.mkdir(path, mode, remotefs.MkdirCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) MkdirAll(path string, mode os.FileMode) error {
|
||||
return l.mkdir(path, mode, remotefs.MkdirAllCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) mkdir(path string, mode os.FileMode, cmd string) error {
|
||||
modeStr := strconv.FormatUint(uint64(mode), 8)
|
||||
logrus.Debugf("remotefs.%s args: %s %s", cmd, path, modeStr)
|
||||
return l.runRemoteFSProcess(nil, nil, cmd, path, modeStr)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Remove(path string) error {
|
||||
return l.remove(path, remotefs.RemoveCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) RemoveAll(path string) error {
|
||||
return l.remove(path, remotefs.RemoveAllCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) remove(path string, cmd string) error {
|
||||
logrus.Debugf("remotefs.%s args: %s", cmd, path)
|
||||
return l.runRemoteFSProcess(nil, nil, cmd, path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Link(oldname, newname string) error {
|
||||
return l.link(oldname, newname, remotefs.LinkCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Symlink(oldname, newname string) error {
|
||||
return l.link(oldname, newname, remotefs.SymlinkCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) link(oldname, newname, cmd string) error {
|
||||
logrus.Debugf("remotefs.%s args: %s %s", cmd, oldname, newname)
|
||||
return l.runRemoteFSProcess(nil, nil, cmd, oldname, newname)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Lchown(name string, uid, gid int64) error {
|
||||
uidStr := strconv.FormatInt(uid, 10)
|
||||
gidStr := strconv.FormatInt(gid, 10)
|
||||
|
||||
logrus.Debugf("remotefs.lchown args: %s %s %s", name, uidStr, gidStr)
|
||||
return l.runRemoteFSProcess(nil, nil, remotefs.LchownCmd, name, uidStr, gidStr)
|
||||
}
|
||||
|
||||
// Lchmod changes the mode of an file not following symlinks.
|
||||
func (l *lcowfs) Lchmod(path string, mode os.FileMode) error {
|
||||
modeStr := strconv.FormatUint(uint64(mode), 8)
|
||||
logrus.Debugf("remotefs.lchmod args: %s %s", path, modeStr)
|
||||
return l.runRemoteFSProcess(nil, nil, remotefs.LchmodCmd, path, modeStr)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Mknod(path string, mode os.FileMode, major, minor int) error {
|
||||
modeStr := strconv.FormatUint(uint64(mode), 8)
|
||||
majorStr := strconv.FormatUint(uint64(major), 10)
|
||||
minorStr := strconv.FormatUint(uint64(minor), 10)
|
||||
|
||||
logrus.Debugf("remotefs.mknod args: %s %s %s %s", path, modeStr, majorStr, minorStr)
|
||||
return l.runRemoteFSProcess(nil, nil, remotefs.MknodCmd, path, modeStr, majorStr, minorStr)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Mkfifo(path string, mode os.FileMode) error {
|
||||
modeStr := strconv.FormatUint(uint64(mode), 8)
|
||||
logrus.Debugf("remotefs.mkfifo args: %s %s", path, modeStr)
|
||||
return l.runRemoteFSProcess(nil, nil, remotefs.MkfifoCmd, path, modeStr)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Stat(p string) (os.FileInfo, error) {
|
||||
return l.stat(p, remotefs.StatCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Lstat(p string) (os.FileInfo, error) {
|
||||
return l.stat(p, remotefs.LstatCmd)
|
||||
}
|
||||
|
||||
func (l *lcowfs) stat(path string, cmd string) (os.FileInfo, error) {
|
||||
logrus.Debugf("remotefs.stat inputs: %s %s", cmd, path)
|
||||
|
||||
output := &bytes.Buffer{}
|
||||
err := l.runRemoteFSProcess(nil, output, cmd, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fi remotefs.FileInfo
|
||||
if err := json.Unmarshal(output.Bytes(), &fi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("remotefs.stat success. got: %v\n", fi)
|
||||
return &fi, nil
|
||||
}
|
212
daemon/graphdriver/lcow/remotefs_pathdriver.go
Normal file
212
daemon/graphdriver/lcow/remotefs_pathdriver.go
Normal file
|
@ -0,0 +1,212 @@
|
|||
// +build windows
|
||||
|
||||
package lcow
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/continuity/pathdriver"
|
||||
)
|
||||
|
||||
var _ pathdriver.PathDriver = &lcowfs{}
|
||||
|
||||
// Continuity Path functions can be done locally
|
||||
func (l *lcowfs) Join(path ...string) string {
|
||||
return pathpkg.Join(path...)
|
||||
}
|
||||
|
||||
func (l *lcowfs) IsAbs(path string) bool {
|
||||
return pathpkg.IsAbs(path)
|
||||
}
|
||||
|
||||
func sameWord(a, b string) bool {
|
||||
return a == b
|
||||
}
|
||||
|
||||
// Implementation taken from the Go standard library
|
||||
func (l *lcowfs) Rel(basepath, targpath string) (string, error) {
|
||||
baseVol := ""
|
||||
targVol := ""
|
||||
base := l.Clean(basepath)
|
||||
targ := l.Clean(targpath)
|
||||
if sameWord(targ, base) {
|
||||
return ".", nil
|
||||
}
|
||||
base = base[len(baseVol):]
|
||||
targ = targ[len(targVol):]
|
||||
if base == "." {
|
||||
base = ""
|
||||
}
|
||||
// Can't use IsAbs - `\a` and `a` are both relative in Windows.
|
||||
baseSlashed := len(base) > 0 && base[0] == l.Separator()
|
||||
targSlashed := len(targ) > 0 && targ[0] == l.Separator()
|
||||
if baseSlashed != targSlashed || !sameWord(baseVol, targVol) {
|
||||
return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
|
||||
}
|
||||
// Position base[b0:bi] and targ[t0:ti] at the first differing elements.
|
||||
bl := len(base)
|
||||
tl := len(targ)
|
||||
var b0, bi, t0, ti int
|
||||
for {
|
||||
for bi < bl && base[bi] != l.Separator() {
|
||||
bi++
|
||||
}
|
||||
for ti < tl && targ[ti] != l.Separator() {
|
||||
ti++
|
||||
}
|
||||
if !sameWord(targ[t0:ti], base[b0:bi]) {
|
||||
break
|
||||
}
|
||||
if bi < bl {
|
||||
bi++
|
||||
}
|
||||
if ti < tl {
|
||||
ti++
|
||||
}
|
||||
b0 = bi
|
||||
t0 = ti
|
||||
}
|
||||
if base[b0:bi] == ".." {
|
||||
return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
|
||||
}
|
||||
if b0 != bl {
|
||||
// Base elements left. Must go up before going down.
|
||||
seps := strings.Count(base[b0:bl], string(l.Separator()))
|
||||
size := 2 + seps*3
|
||||
if tl != t0 {
|
||||
size += 1 + tl - t0
|
||||
}
|
||||
buf := make([]byte, size)
|
||||
n := copy(buf, "..")
|
||||
for i := 0; i < seps; i++ {
|
||||
buf[n] = l.Separator()
|
||||
copy(buf[n+1:], "..")
|
||||
n += 3
|
||||
}
|
||||
if t0 != tl {
|
||||
buf[n] = l.Separator()
|
||||
copy(buf[n+1:], targ[t0:])
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
return targ[t0:], nil
|
||||
}
|
||||
|
||||
func (l *lcowfs) Base(path string) string {
|
||||
return pathpkg.Base(path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Dir(path string) string {
|
||||
return pathpkg.Dir(path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Clean(path string) string {
|
||||
return pathpkg.Clean(path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Split(path string) (dir, file string) {
|
||||
return pathpkg.Split(path)
|
||||
}
|
||||
|
||||
func (l *lcowfs) Separator() byte {
|
||||
return '/'
|
||||
}
|
||||
|
||||
func (l *lcowfs) Abs(path string) (string, error) {
|
||||
// Abs is supposed to add the current working directory, which is meaningless in lcow.
|
||||
// So, return an error.
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
// Implementation taken from the Go standard library
|
||||
func (l *lcowfs) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
info, err := l.Lstat(root)
|
||||
if err != nil {
|
||||
err = walkFn(root, nil, err)
|
||||
} else {
|
||||
err = l.walk(root, info, walkFn)
|
||||
}
|
||||
if err == filepath.SkipDir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// walk recursively descends path, calling w.
|
||||
func (l *lcowfs) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
|
||||
err := walkFn(path, info, nil)
|
||||
if err != nil {
|
||||
if info.IsDir() && err == filepath.SkipDir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
names, err := l.readDirNames(path)
|
||||
if err != nil {
|
||||
return walkFn(path, info, err)
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
filename := l.Join(path, name)
|
||||
fileInfo, err := l.Lstat(filename)
|
||||
if err != nil {
|
||||
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = l.walk(filename, fileInfo, walkFn)
|
||||
if err != nil {
|
||||
if !fileInfo.IsDir() || err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDirNames reads the directory named by dirname and returns
|
||||
// a sorted list of directory entries.
|
||||
func (l *lcowfs) readDirNames(dirname string) ([]string, error) {
|
||||
f, err := l.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := make([]string, len(files), len(files))
|
||||
for i := range files {
|
||||
names[i] = files[i].Name()
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Note that Go's filepath.FromSlash/ToSlash convert between OS paths and '/'. Since the path separator
|
||||
// for LCOW (and Unix) is '/', they are no-ops.
|
||||
func (l *lcowfs) FromSlash(path string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func (l *lcowfs) ToSlash(path string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func (l *lcowfs) Match(pattern, name string) (matched bool, err error) {
|
||||
return pathpkg.Match(pattern, name)
|
||||
}
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/daemon/graphdriver/overlayutils"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/fsutils"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
|
@ -341,21 +342,21 @@ func (d *Driver) Remove(id string) error {
|
|||
}
|
||||
|
||||
// Get creates and mounts the required file system for the given id and returns the mount path.
|
||||
func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, err error) {
|
||||
d.locker.Lock(id)
|
||||
defer d.locker.Unlock(id)
|
||||
dir := d.dir(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
// If id has a root, just return it
|
||||
rootDir := path.Join(dir, "root")
|
||||
if _, err := os.Stat(rootDir); err == nil {
|
||||
return rootDir, nil
|
||||
return containerfs.NewLocalContainerFS(rootDir), nil
|
||||
}
|
||||
mergedDir := path.Join(dir, "merged")
|
||||
if count := d.ctr.Increment(mergedDir); count > 1 {
|
||||
return mergedDir, nil
|
||||
return containerfs.NewLocalContainerFS(mergedDir), nil
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -366,7 +367,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
|||
}()
|
||||
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
lowerDir = path.Join(d.dir(string(lowerID)), "root")
|
||||
|
@ -375,18 +376,18 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
|||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
|
||||
)
|
||||
if err := unix.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
|
||||
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
|
||||
return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
|
||||
}
|
||||
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
|
||||
// user namespace requires this to move a directory from lower to upper.
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
return mergedDir, nil
|
||||
return containerfs.NewLocalContainerFS(mergedDir), nil
|
||||
}
|
||||
|
||||
// Put unmounts the mount path created for the give id.
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/docker/docker/daemon/graphdriver/quota"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/directory"
|
||||
"github.com/docker/docker/pkg/fsutils"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
|
@ -514,12 +515,12 @@ func (d *Driver) Remove(id string) error {
|
|||
}
|
||||
|
||||
// Get creates and mounts the required file system for the given id and returns the mount path.
|
||||
func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
||||
d.locker.Lock(id)
|
||||
defer d.locker.Unlock(id)
|
||||
dir := d.dir(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffDir := path.Join(dir, "diff")
|
||||
|
@ -527,14 +528,14 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
|||
if err != nil {
|
||||
// If no lower, just return diff directory
|
||||
if os.IsNotExist(err) {
|
||||
return diffDir, nil
|
||||
return containerfs.NewLocalContainerFS(diffDir), nil
|
||||
}
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mergedDir := path.Join(dir, "merged")
|
||||
if count := d.ctr.Increment(mergedDir); count > 1 {
|
||||
return mergedDir, nil
|
||||
return containerfs.NewLocalContainerFS(mergedDir), nil
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -574,7 +575,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
|||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
|
||||
mountData = label.FormatMountLabel(opts, mountLabel)
|
||||
if len(mountData) > pageSize {
|
||||
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
|
||||
return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
|
||||
}
|
||||
|
||||
mount = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||
|
@ -584,21 +585,21 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
|||
}
|
||||
|
||||
if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil {
|
||||
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
|
||||
return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
|
||||
}
|
||||
|
||||
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
|
||||
// user namespace requires this to move a directory from lower to upper.
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mergedDir, nil
|
||||
return containerfs.NewLocalContainerFS(mergedDir), nil
|
||||
}
|
||||
|
||||
// Put unmounts the mount path created for the give id.
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
|
@ -129,20 +130,20 @@ func (d *graphDriverProxy) Remove(id string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
|
||||
func (d *graphDriverProxy) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
||||
args := &graphDriverRequest{
|
||||
ID: id,
|
||||
MountLabel: mountLabel,
|
||||
}
|
||||
var ret graphDriverResponse
|
||||
if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
var err error
|
||||
if ret.Err != "" {
|
||||
err = errors.New(ret.Err)
|
||||
}
|
||||
return filepath.Join(d.p.BasePath(), ret.Dir), err
|
||||
return containerfs.NewLocalContainerFS(filepath.Join(d.p.BasePath(), ret.Dir)), err
|
||||
}
|
||||
|
||||
func (d *graphDriverProxy) Put(id string) error {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
|
@ -94,7 +95,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", parent, err)
|
||||
}
|
||||
return CopyWithTar(parentDir, dir)
|
||||
return CopyWithTar(parentDir.Path(), dir)
|
||||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
|
@ -107,14 +108,14 @@ func (d *Driver) Remove(id string) error {
|
|||
}
|
||||
|
||||
// Get returns the directory for the given id.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
||||
dir := d.dir(id)
|
||||
if st, err := os.Stat(dir); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
} else if !st.IsDir() {
|
||||
return "", fmt.Errorf("%s: not a directory", dir)
|
||||
return nil, fmt.Errorf("%s: not a directory", dir)
|
||||
}
|
||||
return dir, nil
|
||||
return containerfs.NewLocalContainerFS(dir), nil
|
||||
}
|
||||
|
||||
// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up.
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
|
@ -354,36 +355,36 @@ func (d *Driver) Remove(id string) error {
|
|||
}
|
||||
|
||||
// Get returns the rootfs path for the id. This will mount the dir at its given path.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
||||
panicIfUsedByLcow()
|
||||
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
|
||||
var dir string
|
||||
|
||||
rID, err := d.resolveID(id)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
if count := d.ctr.Increment(rID); count > 1 {
|
||||
return d.cache[rID], nil
|
||||
return containerfs.NewLocalContainerFS(d.cache[rID]), nil
|
||||
}
|
||||
|
||||
// Getting the layer paths must be done outside of the lock.
|
||||
layerChain, err := d.getLayerChain(rID)
|
||||
if err != nil {
|
||||
d.ctr.Decrement(rID)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
|
||||
d.ctr.Decrement(rID)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
|
||||
d.ctr.Decrement(rID)
|
||||
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
|
||||
}
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
|
||||
|
@ -395,7 +396,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
|
||||
}
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
d.cacheMu.Lock()
|
||||
d.cache[rID] = mountPath
|
||||
|
@ -409,7 +410,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
dir = d.dir(id)
|
||||
}
|
||||
|
||||
return dir, nil
|
||||
return containerfs.NewLocalContainerFS(dir), nil
|
||||
}
|
||||
|
||||
// Put adds a new layer to the driver.
|
||||
|
@ -618,7 +619,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
|||
}
|
||||
defer d.Put(id)
|
||||
|
||||
return archive.ChangesSize(layerFs, changes), nil
|
||||
return archive.ChangesSize(layerFs.Path(), changes), nil
|
||||
}
|
||||
|
||||
// GetMetadata returns custom driver information.
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
|
@ -356,10 +357,10 @@ func (d *Driver) Remove(id string) error {
|
|||
}
|
||||
|
||||
// Get returns the mountpoint for the given id after creating the target directories if necessary.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
|
||||
mountpoint := d.mountPath(id)
|
||||
if count := d.ctr.Increment(mountpoint); count > 1 {
|
||||
return mountpoint, nil
|
||||
return containerfs.NewLocalContainerFS(mountpoint), nil
|
||||
}
|
||||
|
||||
filesystem := d.zfsPath(id)
|
||||
|
@ -369,17 +370,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||
if err != nil {
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
// Create the target directories if they don't exist
|
||||
if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
|
||||
return nil, fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
|
||||
}
|
||||
|
||||
// this could be our first mount after creation of the filesystem, and the root dir may still have root
|
||||
|
@ -387,10 +388,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
|
||||
mount.Unmount(mountpoint)
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
|
||||
return nil, fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
|
||||
}
|
||||
|
||||
return mountpoint, nil
|
||||
return containerfs.NewLocalContainerFS(mountpoint), nil
|
||||
}
|
||||
|
||||
// Put removes the existing mountpoint for the given id if it exists.
|
||||
|
|
|
@ -2,12 +2,14 @@
|
|||
|
||||
package initlayer
|
||||
|
||||
import "github.com/docker/docker/pkg/containerfs"
|
||||
|
||||
// Setup populates a directory with mountpoints suitable
|
||||
// for bind-mounting dockerinit into the container. The mountpoint is simply an
|
||||
// empty file at /.dockerinit
|
||||
//
|
||||
// This extra layer is used by all containers as the top-most ro layer. It protects
|
||||
// the container from unwanted side-effects on the rw layer.
|
||||
func Setup(initLayer string, rootUID, rootGID int) error {
|
||||
func Setup(initLayer containerfs.ContainerFS, rootUID, rootGID int) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
@ -16,7 +17,10 @@ import (
|
|||
//
|
||||
// This extra layer is used by all containers as the top-most ro layer. It protects
|
||||
// the container from unwanted side-effects on the rw layer.
|
||||
func Setup(initLayer string, rootIDs idtools.IDPair) error {
|
||||
func Setup(initLayerFs containerfs.ContainerFS, rootIDs idtools.IDPair) error {
|
||||
// Since all paths are local to the container, we can just extract initLayerFs.Path()
|
||||
initLayer := initLayerFs.Path()
|
||||
|
||||
for pth, typ := range map[string]string{
|
||||
"/dev/pts": "dir",
|
||||
"/dev/shm": "dir",
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
package initlayer
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
|
@ -12,6 +13,6 @@ import (
|
|||
//
|
||||
// This extra layer is used by all containers as the top-most ro layer. It protects
|
||||
// the container from unwanted side-effects on the rw layer.
|
||||
func Setup(initLayer string, rootIDs idtools.IDPair) error {
|
||||
func Setup(initLayer containerfs.ContainerFS, rootIDs idtools.IDPair) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/stringutils"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/volume"
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
@ -187,7 +186,7 @@ func setUser(s *specs.Spec, c *container.Container) error {
|
|||
}
|
||||
|
||||
func readUserFile(c *container.Container, p string) (io.ReadCloser, error) {
|
||||
fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS)
|
||||
fp, err := c.GetResourcePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -634,7 +633,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container)
|
|||
return err
|
||||
}
|
||||
s.Root = &specs.Root{
|
||||
Path: c.BaseFS,
|
||||
Path: c.BaseFS.Path(),
|
||||
Readonly: c.HostConfig.ReadonlyRootfs,
|
||||
}
|
||||
if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil {
|
||||
|
|
|
@ -2,7 +2,6 @@ package daemon
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
|
@ -127,7 +126,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container)
|
|||
return err
|
||||
}
|
||||
s.Root = specs.Root{
|
||||
Path: filepath.Dir(c.BaseFS),
|
||||
Path: c.BaseFS.Dir(c.BaseFS.Path()),
|
||||
Readonly: c.HostConfig.ReadonlyRootfs,
|
||||
}
|
||||
if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil {
|
||||
|
|
|
@ -239,7 +239,7 @@ func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.S
|
|||
|
||||
s.Root.Readonly = false // Windows does not support a read-only root filesystem
|
||||
if !isHyperV {
|
||||
s.Root.Path = c.BaseFS // This is not set for Hyper-V containers
|
||||
s.Root.Path = c.BaseFS.Path() // This is not set for Hyper-V containers
|
||||
if !strings.HasSuffix(s.Root.Path, `\`) {
|
||||
s.Root.Path = s.Root.Path + `\` // Ensure a correctly formatted volume GUID path \\?\Volume{GUID}\
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
|
|||
daemon.unregisterExecCommand(container, eConfig)
|
||||
}
|
||||
|
||||
if container.BaseFS != "" {
|
||||
if container.BaseFS != nil && container.BaseFS.Path() != "" {
|
||||
if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil {
|
||||
logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
|
||||
}
|
||||
|
|
|
@ -198,12 +198,13 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex
|
|||
return
|
||||
}
|
||||
|
||||
// TODO @gupta-ak: Figure out what to do here.
|
||||
dir, err := driver.Get(req.ID, req.MountLabel)
|
||||
if err != nil {
|
||||
respond(w, err)
|
||||
return
|
||||
}
|
||||
respond(w, &graphDriverResponse{Dir: dir})
|
||||
respond(w, &graphDriverResponse{Dir: dir.Path()})
|
||||
})
|
||||
|
||||
mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -137,7 +138,7 @@ type RWLayer interface {
|
|||
|
||||
// Mount mounts the RWLayer and returns the filesystem path
|
||||
// the to the writable layer.
|
||||
Mount(mountLabel string) (string, error)
|
||||
Mount(mountLabel string) (containerfs.ContainerFS, error)
|
||||
|
||||
// Unmount unmounts the RWLayer. This should be called
|
||||
// for every mount. If there are multiple mount calls
|
||||
|
@ -178,7 +179,7 @@ type Metadata struct {
|
|||
// writable mount. Changes made here will
|
||||
// not be included in the Tar stream of the
|
||||
// RWLayer.
|
||||
type MountInit func(root string) error
|
||||
type MountInit func(root containerfs.ContainerFS) error
|
||||
|
||||
// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer
|
||||
type CreateRWLayerOpts struct {
|
||||
|
|
|
@ -749,5 +749,5 @@ func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil
|
||||
return &fileGetPutter{storage.NewPathFileGetter(p.Path()), n.Driver, id}, nil
|
||||
}
|
||||
|
|
|
@ -10,9 +10,11 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/daemon/graphdriver/vfs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
@ -82,7 +84,7 @@ func newTestStore(t *testing.T) (Store, string, func()) {
|
|||
}
|
||||
}
|
||||
|
||||
type layerInit func(root string) error
|
||||
type layerInit func(root containerfs.ContainerFS) error
|
||||
|
||||
func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
|
||||
containerID := stringid.GenerateRandomID()
|
||||
|
@ -91,12 +93,12 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
path, err := mount.Mount("")
|
||||
pathFS, err := mount.Mount("")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := layerFunc(path); err != nil {
|
||||
if err := layerFunc(pathFS); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -123,7 +125,7 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
|
|||
}
|
||||
|
||||
type FileApplier interface {
|
||||
ApplyFile(root string) error
|
||||
ApplyFile(root containerfs.ContainerFS) error
|
||||
}
|
||||
|
||||
type testFile struct {
|
||||
|
@ -140,25 +142,25 @@ func newTestFile(name string, content []byte, perm os.FileMode) FileApplier {
|
|||
}
|
||||
}
|
||||
|
||||
func (tf *testFile) ApplyFile(root string) error {
|
||||
fullPath := filepath.Join(root, tf.name)
|
||||
if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil {
|
||||
func (tf *testFile) ApplyFile(root containerfs.ContainerFS) error {
|
||||
fullPath := root.Join(root.Path(), tf.name)
|
||||
if err := root.MkdirAll(root.Dir(fullPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if already exists
|
||||
if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission {
|
||||
if err := os.Chmod(fullPath, tf.permission); err != nil {
|
||||
if stat, err := root.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission {
|
||||
if err := root.Lchmod(fullPath, tf.permission); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil {
|
||||
if err := driver.WriteFile(root, fullPath, tf.content, tf.permission); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initWithFiles(files ...FileApplier) layerInit {
|
||||
return func(root string) error {
|
||||
return func(root containerfs.ContainerFS) error {
|
||||
for _, f := range files {
|
||||
if err := f.ApplyFile(root); err != nil {
|
||||
return err
|
||||
|
@ -288,7 +290,7 @@ func TestMountAndRegister(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt"))
|
||||
b, err := driver.ReadFile(path2, path2.Join(path2.Path(), "testfile.txt"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -391,12 +393,12 @@ func TestStoreRestore(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := m.Mount("")
|
||||
pathFS, err := m.Mount("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil {
|
||||
if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt"), []byte("nothing here"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -430,20 +432,20 @@ func TestStoreRestore(t *testing.T) {
|
|||
|
||||
if mountPath, err := m2.Mount(""); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if path != mountPath {
|
||||
t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
|
||||
} else if pathFS.Path() != mountPath.Path() {
|
||||
t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path())
|
||||
}
|
||||
|
||||
if mountPath, err := m2.Mount(""); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if path != mountPath {
|
||||
t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
|
||||
} else if pathFS.Path() != mountPath.Path() {
|
||||
t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path())
|
||||
}
|
||||
if err := m2.Unmount(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt"))
|
||||
b, err := driver.ReadFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -618,7 +620,7 @@ func tarFromFiles(files ...FileApplier) ([]byte, error) {
|
|||
defer os.RemoveAll(td)
|
||||
|
||||
for _, f := range files {
|
||||
if err := f.ApplyFile(td); err != nil {
|
||||
if err := f.ApplyFile(containerfs.NewLocalContainerFS(td)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,15 @@
|
|||
package layer
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Getter is an interface to get the path to a layer on the host.
|
||||
type Getter interface {
|
||||
// GetLayerPath gets the path for the layer. This is different from Get()
|
||||
// since that returns an interface to account for umountable layers.
|
||||
GetLayerPath(id string) (string, error)
|
||||
}
|
||||
|
||||
// GetLayerPath returns the path to a layer
|
||||
func GetLayerPath(s Store, layer ChainID) (string, error) {
|
||||
|
@ -16,6 +25,10 @@ func GetLayerPath(s Store, layer ChainID) (string, error) {
|
|||
return "", ErrLayerDoesNotExist
|
||||
}
|
||||
|
||||
if layerGetter, ok := ls.driver.(Getter); ok {
|
||||
return layerGetter.GetLayerPath(rl.cacheID)
|
||||
}
|
||||
|
||||
path, err := ls.driver.Get(rl.cacheID, "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -25,7 +38,7 @@ func GetLayerPath(s Store, layer ChainID) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
return path, nil
|
||||
return path.Path(), nil
|
||||
}
|
||||
|
||||
func (ls *layerStore) mountID(name string) string {
|
||||
|
|
|
@ -2,13 +2,13 @@ package layer
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
)
|
||||
|
||||
func TestMountInit(t *testing.T) {
|
||||
|
@ -28,7 +28,7 @@ func TestMountInit(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mountInit := func(root string) error {
|
||||
mountInit := func(root containerfs.ContainerFS) error {
|
||||
return initfile.ApplyFile(root)
|
||||
}
|
||||
|
||||
|
@ -40,22 +40,22 @@ func TestMountInit(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := m.Mount("")
|
||||
pathFS, err := m.Mount("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(filepath.Join(path, "testfile.txt"))
|
||||
fi, err := pathFS.Stat(pathFS.Join(pathFS.Path(), "testfile.txt"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := pathFS.Open(pathFS.Join(pathFS.Path(), "testfile.txt"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -88,7 +88,7 @@ func TestMountSize(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mountInit := func(root string) error {
|
||||
mountInit := func(root containerfs.ContainerFS) error {
|
||||
return newTestFile("file-init", contentInit, 0777).ApplyFile(root)
|
||||
}
|
||||
rwLayerOpts := &CreateRWLayerOpts{
|
||||
|
@ -100,12 +100,12 @@ func TestMountSize(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := m.Mount("")
|
||||
pathFS, err := m.Mount("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil {
|
||||
if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "file2"), content2, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ func TestMountChanges(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mountInit := func(root string) error {
|
||||
mountInit := func(root containerfs.ContainerFS) error {
|
||||
return initfile.ApplyFile(root)
|
||||
}
|
||||
rwLayerOpts := &CreateRWLayerOpts{
|
||||
|
@ -152,28 +152,28 @@ func TestMountChanges(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := m.Mount("")
|
||||
pathFS, err := m.Mount("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil {
|
||||
if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile1.txt"), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil {
|
||||
if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile1.txt"), []byte("mount data!"), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil {
|
||||
if err := pathFS.Remove(pathFS.Join(pathFS.Path(), "testfile2.txt")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil {
|
||||
if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile3.txt"), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil {
|
||||
if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile4.txt"), []byte("mount data!"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
)
|
||||
|
||||
type mountedLayer struct {
|
||||
|
@ -88,7 +89,7 @@ type referencedRWLayer struct {
|
|||
*mountedLayer
|
||||
}
|
||||
|
||||
func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
|
||||
func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) {
|
||||
return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
|
||||
}
|
||||
|
||||
|
|
|
@ -55,18 +55,17 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
// Archiver allows the reuse of most utility functions of this package
|
||||
// with a pluggable Untar function. Also, to facilitate the passing of
|
||||
// specific id mappings for untar, an archiver can be created with maps
|
||||
// which will then be passed to Untar operations
|
||||
// Archiver implements the Archiver interface and allows the reuse of most utility functions of
|
||||
// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
|
||||
// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
|
||||
type Archiver struct {
|
||||
Untar func(io.Reader, string, *TarOptions) error
|
||||
IDMappings *idtools.IDMappings
|
||||
Untar func(io.Reader, string, *TarOptions) error
|
||||
IDMappingsVar *idtools.IDMappings
|
||||
}
|
||||
|
||||
// NewDefaultArchiver returns a new Archiver without any IDMappings
|
||||
func NewDefaultArchiver() *Archiver {
|
||||
return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}}
|
||||
return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}}
|
||||
}
|
||||
|
||||
// breakoutError is used to differentiate errors related to breaking out
|
||||
|
@ -1025,8 +1024,8 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
|||
}
|
||||
defer archive.Close()
|
||||
options := &TarOptions{
|
||||
UIDMaps: archiver.IDMappings.UIDs(),
|
||||
GIDMaps: archiver.IDMappings.GIDs(),
|
||||
UIDMaps: archiver.IDMappingsVar.UIDs(),
|
||||
GIDMaps: archiver.IDMappingsVar.GIDs(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
|
@ -1039,8 +1038,8 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
|
|||
}
|
||||
defer archive.Close()
|
||||
options := &TarOptions{
|
||||
UIDMaps: archiver.IDMappings.UIDs(),
|
||||
GIDMaps: archiver.IDMappings.GIDs(),
|
||||
UIDMaps: archiver.IDMappingsVar.UIDs(),
|
||||
GIDMaps: archiver.IDMappingsVar.GIDs(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
|
@ -1058,10 +1057,10 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
|
|||
return archiver.CopyFileWithTar(src, dst)
|
||||
}
|
||||
|
||||
// if this archiver is set up with ID mapping we need to create
|
||||
// if this Archiver is set up with ID mapping we need to create
|
||||
// the new destination directory with the remapped root UID/GID pair
|
||||
// as owner
|
||||
rootIDs := archiver.IDMappings.RootPair()
|
||||
rootIDs := archiver.IDMappingsVar.RootPair()
|
||||
// Create dst, copy src's content into it
|
||||
logrus.Debugf("Creating dest directory: %s", dst)
|
||||
if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
|
||||
|
@ -1112,7 +1111,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
|||
hdr.Name = filepath.Base(dst)
|
||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
||||
|
||||
if err := remapIDs(archiver.IDMappings, hdr); err != nil {
|
||||
if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1139,6 +1138,11 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
// IDMappings returns the IDMappings of the archiver.
|
||||
func (archiver *Archiver) IDMappings() *idtools.IDMappings {
|
||||
return archiver.IDMappingsVar
|
||||
}
|
||||
|
||||
func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
|
||||
ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
|
||||
hdr.Uid, hdr.Gid = ids.UID, ids.GID
|
||||
|
|
|
@ -27,23 +27,23 @@ var (
|
|||
// path (from before being processed by utility functions from the path or
|
||||
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||
// path already ends in a `.` path segment, then another is not added. If the
|
||||
// clean path already ends in a path separator, then another is not added.
|
||||
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
||||
// clean path already ends in the separator, then another is not added.
|
||||
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
|
||||
// Ensure paths are in platform semantics
|
||||
cleanedPath = normalizePath(cleanedPath)
|
||||
originalPath = normalizePath(originalPath)
|
||||
cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
|
||||
originalPath = strings.Replace(originalPath, "/", string(sep), -1)
|
||||
|
||||
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||
if !hasTrailingPathSeparator(cleanedPath) {
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) {
|
||||
// Add a separator if it doesn't already end with one (a cleaned
|
||||
// path would only end in a separator if it is the root).
|
||||
cleanedPath += string(filepath.Separator)
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
cleanedPath += "."
|
||||
}
|
||||
|
||||
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
|
||||
cleanedPath += string(filepath.Separator)
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
|
||||
return cleanedPath
|
||||
|
@ -52,14 +52,14 @@ func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
|||
// assertsDirectory returns whether the given path is
|
||||
// asserted to be a directory, i.e., the path ends with
|
||||
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||
func assertsDirectory(path string) bool {
|
||||
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
|
||||
func assertsDirectory(path string, sep byte) bool {
|
||||
return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
|
||||
}
|
||||
|
||||
// hasTrailingPathSeparator returns whether the given
|
||||
// path ends with the system's path separator character.
|
||||
func hasTrailingPathSeparator(path string) bool {
|
||||
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
|
||||
func hasTrailingPathSeparator(path string, sep byte) bool {
|
||||
return len(path) > 0 && path[len(path)-1] == sep
|
||||
}
|
||||
|
||||
// specifiesCurrentDir returns whether the given path specifies
|
||||
|
@ -72,10 +72,10 @@ func specifiesCurrentDir(path string) bool {
|
|||
// basename by first cleaning the path but preserves a trailing "." if the
|
||||
// original path specified the current directory.
|
||||
func SplitPathDirEntry(path string) (dir, base string) {
|
||||
cleanedPath := filepath.Clean(normalizePath(path))
|
||||
cleanedPath := filepath.Clean(filepath.FromSlash(path))
|
||||
|
||||
if specifiesCurrentDir(path) {
|
||||
cleanedPath += string(filepath.Separator) + "."
|
||||
cleanedPath += string(os.PathSeparator) + "."
|
||||
}
|
||||
|
||||
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||
|
@ -106,19 +106,24 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er
|
|||
// Separate the source path between its directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
|
||||
filter := []string{sourceBase}
|
||||
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
|
||||
|
||||
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||
return TarWithOptions(sourceDir, opts)
|
||||
}
|
||||
|
||||
return TarWithOptions(sourceDir, &TarOptions{
|
||||
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
|
||||
// parameters to be sent to TarWithOptions (the TarOptions struct)
|
||||
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
|
||||
filter := []string{sourceBase}
|
||||
return &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
IncludeSourceDir: true,
|
||||
RebaseNames: map[string]string{
|
||||
sourceBase: rebaseName,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// CopyInfo holds basic info about the source
|
||||
|
@ -281,7 +286,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
|
|||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case assertsDirectory(dstInfo.Path):
|
||||
case assertsDirectory(dstInfo.Path, os.PathSeparator):
|
||||
// The destination does not exist and is asserted to be created as a
|
||||
// directory, but the source content is not a directory. This is an
|
||||
// error condition since you cannot create a directory from a file
|
||||
|
@ -351,6 +356,9 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read
|
|||
return rebased
|
||||
}
|
||||
|
||||
// TODO @gupta-ak. These might have to be changed in the future to be
|
||||
// continuity driver aware as well to support LCOW.
|
||||
|
||||
// CopyResource performs an archive copy from the given source path to the
|
||||
// given destination path. The source path MUST exist and the destination
|
||||
// path's parent directory must exist.
|
||||
|
@ -365,8 +373,8 @@ func CopyResource(srcPath, dstPath string, followLink bool) error {
|
|||
dstPath = normalizePath(dstPath)
|
||||
|
||||
// Clean the source and destination paths.
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
|
||||
|
||||
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||
return err
|
||||
|
@ -429,7 +437,8 @@ func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseNa
|
|||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
}
|
||||
|
@ -442,11 +451,13 @@ func GetRebaseName(path, resolvedPath string) (string, string) {
|
|||
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||
// we can manually join it with them
|
||||
var rebaseName string
|
||||
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
|
||||
if specifiesCurrentDir(path) &&
|
||||
!specifiesCurrentDir(resolvedPath) {
|
||||
resolvedPath += string(filepath.Separator) + "."
|
||||
}
|
||||
|
||||
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
|
||||
resolvedPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,10 @@ func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
|
|||
if idMappings == nil {
|
||||
idMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &archive.Archiver{Untar: Untar, IDMappings: idMappings}
|
||||
return &archive.Archiver{
|
||||
Untar: Untar,
|
||||
IDMappingsVar: idMappings,
|
||||
}
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
|
|
194
pkg/containerfs/archiver.go
Normal file
194
pkg/containerfs/archiver.go
Normal file
|
@ -0,0 +1,194 @@
|
|||
package containerfs
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/promise"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// TarFunc provides a function definition for a custom Tar function
|
||||
type TarFunc func(string, *archive.TarOptions) (io.ReadCloser, error)
|
||||
|
||||
// UntarFunc provides a function definition for a custom Untar function
|
||||
type UntarFunc func(io.Reader, string, *archive.TarOptions) error
|
||||
|
||||
// Archiver provides a similar implementation of the archive.Archiver package with the rootfs abstraction
|
||||
type Archiver struct {
|
||||
SrcDriver Driver
|
||||
DstDriver Driver
|
||||
Tar TarFunc
|
||||
Untar UntarFunc
|
||||
IDMappingsVar *idtools.IDMappings
|
||||
}
|
||||
|
||||
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
|
||||
// If either Tar or Untar fails, TarUntar aborts and returns the error.
|
||||
func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
logrus.Debugf("TarUntar(%s %s)", src, dst)
|
||||
tarArchive, err := archiver.Tar(src, &archive.TarOptions{Compression: archive.Uncompressed})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tarArchive.Close()
|
||||
options := &archive.TarOptions{
|
||||
UIDMaps: archiver.IDMappingsVar.UIDs(),
|
||||
GIDMaps: archiver.IDMappingsVar.GIDs(),
|
||||
}
|
||||
return archiver.Untar(tarArchive, dst, options)
|
||||
}
|
||||
|
||||
// UntarPath untar a file from path to a destination, src is the source tar file path.
|
||||
func (archiver *Archiver) UntarPath(src, dst string) error {
|
||||
tarArchive, err := archiver.SrcDriver.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tarArchive.Close()
|
||||
options := &archive.TarOptions{
|
||||
UIDMaps: archiver.IDMappingsVar.UIDs(),
|
||||
GIDMaps: archiver.IDMappingsVar.GIDs(),
|
||||
}
|
||||
return archiver.Untar(tarArchive, dst, options)
|
||||
}
|
||||
|
||||
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
||||
// unpacks it at filesystem path `dst`.
|
||||
// The archive is streamed directly with fixed buffering and no
|
||||
// intermediary disk IO.
|
||||
func (archiver *Archiver) CopyWithTar(src, dst string) error {
|
||||
srcSt, err := archiver.SrcDriver.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !srcSt.IsDir() {
|
||||
return archiver.CopyFileWithTar(src, dst)
|
||||
}
|
||||
|
||||
// if this archiver is set up with ID mapping we need to create
|
||||
// the new destination directory with the remapped root UID/GID pair
|
||||
// as owner
|
||||
rootIDs := archiver.IDMappingsVar.RootPair()
|
||||
// Create dst, copy src's content into it
|
||||
if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||
return archiver.TarUntar(src, dst)
|
||||
}
|
||||
|
||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
||||
// for a single file. It copies a regular file from path `src` to
|
||||
// path `dst`, and preserves all its metadata.
|
||||
func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
||||
srcDriver := archiver.SrcDriver
|
||||
dstDriver := archiver.DstDriver
|
||||
|
||||
srcSt, err := srcDriver.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if srcSt.IsDir() {
|
||||
return fmt.Errorf("Can't copy a directory")
|
||||
}
|
||||
|
||||
// Clean up the trailing slash. This must be done in an operating
|
||||
// system specific manner.
|
||||
if dst[len(dst)-1] == dstDriver.Separator() {
|
||||
dst = dstDriver.Join(dst, srcDriver.Base(src))
|
||||
}
|
||||
|
||||
// The original call was system.MkdirAll, which is just
|
||||
// os.MkdirAll on not-Windows and changed for Windows.
|
||||
if dstDriver.OS() == "windows" {
|
||||
// Now we are WCOW
|
||||
if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// We can just use the driver.MkdirAll function
|
||||
if err := dstDriver.MkdirAll(dstDriver.Dir(dst), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
errC := promise.Go(func() error {
|
||||
defer w.Close()
|
||||
|
||||
srcF, err := srcDriver.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcF.Close()
|
||||
|
||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Name = dstDriver.Base(dst)
|
||||
if dstDriver.OS() == "windows" {
|
||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
||||
} else {
|
||||
hdr.Mode = int64(os.FileMode(hdr.Mode))
|
||||
}
|
||||
|
||||
if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tw := tar.NewWriter(w)
|
||||
defer tw.Close()
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(tw, srcF); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
defer func() {
|
||||
if er := <-errC; err == nil && er != nil {
|
||||
err = er
|
||||
}
|
||||
}()
|
||||
|
||||
err = archiver.Untar(r, dstDriver.Dir(dst), nil)
|
||||
if err != nil {
|
||||
r.CloseWithError(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// IDMappings returns the IDMappings of the archiver.
|
||||
func (archiver *Archiver) IDMappings() *idtools.IDMappings {
|
||||
return archiver.IDMappingsVar
|
||||
}
|
||||
|
||||
func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
|
||||
ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
|
||||
hdr.Uid, hdr.Gid = ids.UID, ids.GID
|
||||
return err
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
|
||||
permPart := perm & os.ModePerm
|
||||
noPermPart := perm &^ os.ModePerm
|
||||
// Add the x bit: make everything +x from windows
|
||||
permPart |= 0111
|
||||
permPart &= 0755
|
||||
|
||||
return noPermPart | permPart
|
||||
}
|
87
pkg/containerfs/containerfs.go
Normal file
87
pkg/containerfs/containerfs.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
package containerfs
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/containerd/continuity/pathdriver"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
)
|
||||
|
||||
// ContainerFS is that represents a root file system
|
||||
type ContainerFS interface {
|
||||
// Path returns the path to the root. Note that this may not exist
|
||||
// on the local system, so the continuity operations must be used
|
||||
Path() string
|
||||
|
||||
// ResolveScopedPath evaluates the given path scoped to the root.
|
||||
// For example, if root=/a, and path=/b/c, then this function would return /a/b/c.
|
||||
// If rawPath is true, then the function will not preform any modifications
|
||||
// before path resolution. Otherwise, the function will clean the given path
|
||||
// by making it an absolute path.
|
||||
ResolveScopedPath(path string, rawPath bool) (string, error)
|
||||
|
||||
Driver
|
||||
}
|
||||
|
||||
// Driver combines both continuity's Driver and PathDriver interfaces with a Platform
|
||||
// field to determine the OS.
|
||||
type Driver interface {
|
||||
// OS returns the OS where the rootfs is located. Essentially,
|
||||
// runtime.GOOS for everything aside from LCOW, which is "linux"
|
||||
OS() string
|
||||
|
||||
// Architecture returns the hardware architecture where the
|
||||
// container is located.
|
||||
Architecture() string
|
||||
|
||||
// Driver & PathDriver provide methods to manipulate files & paths
|
||||
driver.Driver
|
||||
pathdriver.PathDriver
|
||||
}
|
||||
|
||||
// NewLocalContainerFS is a helper function to implement daemon's Mount interface
|
||||
// when the graphdriver mount point is a local path on the machine.
|
||||
func NewLocalContainerFS(path string) ContainerFS {
|
||||
return &local{
|
||||
path: path,
|
||||
Driver: driver.LocalDriver,
|
||||
PathDriver: pathdriver.LocalPathDriver,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLocalDriver provides file and path drivers for a local file system. They are
|
||||
// essentially a wrapper around the `os` and `filepath` functions.
|
||||
func NewLocalDriver() Driver {
|
||||
return &local{
|
||||
Driver: driver.LocalDriver,
|
||||
PathDriver: pathdriver.LocalPathDriver,
|
||||
}
|
||||
}
|
||||
|
||||
type local struct {
|
||||
path string
|
||||
driver.Driver
|
||||
pathdriver.PathDriver
|
||||
}
|
||||
|
||||
func (l *local) Path() string {
|
||||
return l.path
|
||||
}
|
||||
|
||||
func (l *local) ResolveScopedPath(path string, rawPath bool) (string, error) {
|
||||
cleanedPath := path
|
||||
if !rawPath {
|
||||
cleanedPath = cleanScopedPath(path)
|
||||
}
|
||||
return symlink.FollowSymlinkInScope(filepath.Join(l.path, cleanedPath), l.path)
|
||||
}
|
||||
|
||||
func (l *local) OS() string {
|
||||
return runtime.GOOS
|
||||
}
|
||||
|
||||
func (l *local) Architecture() string {
|
||||
return runtime.GOARCH
|
||||
}
|
10
pkg/containerfs/containerfs_unix.go
Normal file
10
pkg/containerfs/containerfs_unix.go
Normal file
|
@ -0,0 +1,10 @@
|
|||
// +build !windows
|
||||
|
||||
package containerfs
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// cleanScopedPath preappends a to combine with a mnt path.
|
||||
func cleanScopedPath(path string) string {
|
||||
return filepath.Join(string(filepath.Separator), path)
|
||||
}
|
15
pkg/containerfs/containerfs_windows.go
Normal file
15
pkg/containerfs/containerfs_windows.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package containerfs
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// cleanScopedPath removes the C:\ syntax, and prepares to combine
|
||||
// with a volume path
|
||||
func cleanScopedPath(path string) string {
|
||||
if len(path) >= 2 {
|
||||
c := path[0]
|
||||
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
|
||||
path = path[2:]
|
||||
}
|
||||
}
|
||||
return filepath.Join(string(filepath.Separator), path)
|
||||
}
|
7
pkg/system/init_unix.go
Normal file
7
pkg/system/init_unix.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !windows
|
||||
|
||||
package system
|
||||
|
||||
// InitLCOW does nothing since LCOW is a windows only feature
|
||||
func InitLCOW(experimental bool) {
|
||||
}
|
|
@ -8,9 +8,10 @@ import "os"
|
|||
// on build number. @jhowardmsft
|
||||
var lcowSupported = false
|
||||
|
||||
func init() {
|
||||
// InitLCOW sets whether LCOW is supported or not
|
||||
func InitLCOW(experimental bool) {
|
||||
// LCOW initialization
|
||||
if os.Getenv("LCOW_SUPPORTED") != "" {
|
||||
if experimental && os.Getenv("LCOW_SUPPORTED") != "" {
|
||||
lcowSupported = true
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,13 @@
|
|||
package system
|
||||
|
||||
import "runtime"
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/continuity/pathdriver"
|
||||
)
|
||||
|
||||
const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
||||
|
@ -19,3 +26,35 @@ func DefaultPathEnv(platform string) string {
|
|||
return defaultUnixPathEnv
|
||||
|
||||
}
|
||||
|
||||
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
||||
// is the system drive.
|
||||
// On Linux: this is a no-op.
|
||||
// On Windows: this does the following>
|
||||
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
||||
// This is used, for example, when validating a user provided path in docker cp.
|
||||
// If a drive letter is supplied, it must be the system drive. The drive letter
|
||||
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
|
||||
// need the path in this syntax so that it can ultimately be contatenated with
|
||||
// a Windows long-path which doesn't support drive-letters. Examples:
|
||||
// C: --> Fail
|
||||
// C:\ --> \
|
||||
// a --> a
|
||||
// /a --> \a
|
||||
// d:\ --> Fail
|
||||
func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) {
|
||||
if runtime.GOOS != "windows" || LCOWSupported() {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if len(path) == 2 && string(path[1]) == ":" {
|
||||
return "", fmt.Errorf("No relative path specified in %q", path)
|
||||
}
|
||||
if !driver.IsAbs(path) || len(path) < 2 {
|
||||
return filepath.FromSlash(path), nil
|
||||
}
|
||||
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
|
||||
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
|
||||
}
|
||||
return filepath.FromSlash(path[2:]), nil
|
||||
}
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package system
|
||||
|
||||
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
||||
// is the system drive. This is a no-op on Linux.
|
||||
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||
return path, nil
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
||||
// This is used, for example, when validating a user provided path in docker cp.
|
||||
// If a drive letter is supplied, it must be the system drive. The drive letter
|
||||
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
|
||||
// need the path in this syntax so that it can ultimately be concatenated with
|
||||
// a Windows long-path which doesn't support drive-letters. Examples:
|
||||
// C: --> Fail
|
||||
// C:\ --> \
|
||||
// a --> a
|
||||
// /a --> \a
|
||||
// d:\ --> Fail
|
||||
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||
if len(path) == 2 && string(path[1]) == ":" {
|
||||
return "", fmt.Errorf("No relative path specified in %q", path)
|
||||
}
|
||||
if !filepath.IsAbs(path) || len(path) < 2 {
|
||||
return filepath.FromSlash(path), nil
|
||||
}
|
||||
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
|
||||
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
|
||||
}
|
||||
return filepath.FromSlash(path[2:]), nil
|
||||
}
|
|
@ -2,19 +2,23 @@
|
|||
|
||||
package system
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/continuity/pathdriver"
|
||||
)
|
||||
|
||||
// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter
|
||||
func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
|
||||
// Fails if not C drive.
|
||||
_, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`)
|
||||
_, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`, pathdriver.LocalPathDriver)
|
||||
if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") {
|
||||
t.Fatalf("Expected error for d:")
|
||||
}
|
||||
|
||||
// Single character is unchanged
|
||||
var path string
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil {
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter("z", pathdriver.LocalPathDriver); err != nil {
|
||||
t.Fatalf("Single character should pass")
|
||||
}
|
||||
if path != "z" {
|
||||
|
@ -22,7 +26,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
|
|||
}
|
||||
|
||||
// Two characters without colon is unchanged
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil {
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter("AB", pathdriver.LocalPathDriver); err != nil {
|
||||
t.Fatalf("2 characters without colon should pass")
|
||||
}
|
||||
if path != "AB" {
|
||||
|
@ -30,7 +34,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
|
|||
}
|
||||
|
||||
// Abs path without drive letter
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil {
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`, pathdriver.LocalPathDriver); err != nil {
|
||||
t.Fatalf("abs path no drive letter should pass")
|
||||
}
|
||||
if path != `\l` {
|
||||
|
@ -38,7 +42,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
|
|||
}
|
||||
|
||||
// Abs path without drive letter, linux style
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil {
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`, pathdriver.LocalPathDriver); err != nil {
|
||||
t.Fatalf("abs path no drive letter linux style should pass")
|
||||
}
|
||||
if path != `\l` {
|
||||
|
@ -46,7 +50,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
|
|||
}
|
||||
|
||||
// Drive-colon should be stripped
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil {
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`, pathdriver.LocalPathDriver); err != nil {
|
||||
t.Fatalf("An absolute path should pass")
|
||||
}
|
||||
if path != `\` {
|
||||
|
@ -54,7 +58,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
|
|||
}
|
||||
|
||||
// Verify with a linux-style path
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil {
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`, pathdriver.LocalPathDriver); err != nil {
|
||||
t.Fatalf("An absolute path should pass")
|
||||
}
|
||||
if path != `\` {
|
||||
|
@ -62,7 +66,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
|
|||
}
|
||||
|
||||
// Failure on c:
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil {
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`, pathdriver.LocalPathDriver); err == nil {
|
||||
t.Fatalf("c: should fail")
|
||||
}
|
||||
if err.Error() != `No relative path specified in "c:"` {
|
||||
|
@ -70,7 +74,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
|
|||
}
|
||||
|
||||
// Failure on d:
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil {
|
||||
if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`, pathdriver.LocalPathDriver); err == nil {
|
||||
t.Fatalf("c: should fail")
|
||||
}
|
||||
if err.Error() != `No relative path specified in "d:"` {
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/daemon/initlayer"
|
||||
"github.com/docker/docker/libcontainerd"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
|
@ -57,7 +58,8 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), idtools.IDPair{0, 0}); err != nil {
|
||||
rootFS := containerfs.NewLocalContainerFS(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName))
|
||||
if err := initlayer.Setup(rootFS, idtools.IDPair{0, 0}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@ github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
|
|||
github.com/imdario/mergo 0.2.1
|
||||
golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
|
||||
|
||||
github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
|
||||
|
||||
#get libnetwork packages
|
||||
github.com/docker/libnetwork d5c822319097cc01cc9bd5ffedd74c7ce7c894f2
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
|
|
4
vendor/github.com/Microsoft/opengcs/service/gcsutils/README
generated
vendored
Normal file
4
vendor/github.com/Microsoft/opengcs/service/gcsutils/README
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
1. This program only runs in Linux. So you just first copy the files over to a Linux machine.
|
||||
2. Get Go and and then run make get-deps && make. This is set the $GOPATH for you and build the binaries.
|
||||
3. vhd_to_tar and tar_to_vhd are the standalone executables that read/write to stdin/out and do the tar <-> vhd conversion.
|
||||
tar2vhd_server is the service VM server that takes client requests over hvsock.
|
109
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go
generated
vendored
Normal file
109
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
package remotefs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RemotefsCmd is the name of the remotefs meta command
|
||||
const RemotefsCmd = "remotefs"
|
||||
|
||||
// Name of the commands when called from the cli context (remotefs <CMD> ...)
|
||||
const (
|
||||
StatCmd = "stat"
|
||||
LstatCmd = "lstat"
|
||||
ReadlinkCmd = "readlink"
|
||||
MkdirCmd = "mkdir"
|
||||
MkdirAllCmd = "mkdirall"
|
||||
RemoveCmd = "remove"
|
||||
RemoveAllCmd = "removeall"
|
||||
LinkCmd = "link"
|
||||
SymlinkCmd = "symlink"
|
||||
LchmodCmd = "lchmod"
|
||||
LchownCmd = "lchown"
|
||||
MknodCmd = "mknod"
|
||||
MkfifoCmd = "mkfifo"
|
||||
OpenFileCmd = "openfile"
|
||||
ReadFileCmd = "readfile"
|
||||
WriteFileCmd = "writefile"
|
||||
ReadDirCmd = "readdir"
|
||||
ResolvePathCmd = "resolvepath"
|
||||
ExtractArchiveCmd = "extractarchive"
|
||||
ArchivePathCmd = "archivepath"
|
||||
)
|
||||
|
||||
// ErrInvalid is returned if the parameters are invalid
|
||||
var ErrInvalid = errors.New("invalid arguments")
|
||||
|
||||
// ErrUnknown is returned for an unknown remotefs command
|
||||
var ErrUnknown = errors.New("unkown command")
|
||||
|
||||
// ExportedError is the serialized version of the a Go error.
|
||||
// It also provides a trivial implementation of the error interface.
|
||||
type ExportedError struct {
|
||||
ErrString string
|
||||
ErrNum int `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Error returns an error string
|
||||
func (ee *ExportedError) Error() string {
|
||||
return ee.ErrString
|
||||
}
|
||||
|
||||
// FileInfo is the stat struct returned by the remotefs system. It
|
||||
// fulfills the os.FileInfo interface.
|
||||
type FileInfo struct {
|
||||
NameVar string
|
||||
SizeVar int64
|
||||
ModeVar os.FileMode
|
||||
ModTimeVar int64 // Serialization of time.Time breaks in travis, so use an int
|
||||
IsDirVar bool
|
||||
}
|
||||
|
||||
var _ os.FileInfo = &FileInfo{}
|
||||
|
||||
// Name returns the filename from a FileInfo structure
|
||||
func (f *FileInfo) Name() string { return f.NameVar }
|
||||
|
||||
// Size returns the size from a FileInfo structure
|
||||
func (f *FileInfo) Size() int64 { return f.SizeVar }
|
||||
|
||||
// Mode returns the mode from a FileInfo structure
|
||||
func (f *FileInfo) Mode() os.FileMode { return f.ModeVar }
|
||||
|
||||
// ModTime returns the modification time from a FileInfo structure
|
||||
func (f *FileInfo) ModTime() time.Time { return time.Unix(0, f.ModTimeVar) }
|
||||
|
||||
// IsDir returns the is-directory indicator from a FileInfo structure
|
||||
func (f *FileInfo) IsDir() bool { return f.IsDirVar }
|
||||
|
||||
// Sys provides an interface to a FileInfo structure
|
||||
func (f *FileInfo) Sys() interface{} { return nil }
|
||||
|
||||
// FileHeader is a header for remote *os.File operations for remotefs.OpenFile
|
||||
type FileHeader struct {
|
||||
Cmd uint32
|
||||
Size uint64
|
||||
}
|
||||
|
||||
const (
|
||||
// Read request command.
|
||||
Read uint32 = iota
|
||||
// Write request command.
|
||||
Write
|
||||
// Seek request command.
|
||||
Seek
|
||||
// Close request command.
|
||||
Close
|
||||
// CmdOK is a response meaning request succeeded.
|
||||
CmdOK
|
||||
// CmdFailed is a response meaning request failed.
|
||||
CmdFailed
|
||||
)
|
||||
|
||||
// SeekHeader is header for the Seek operation for remotefs.OpenFile
|
||||
type SeekHeader struct {
|
||||
Offset int64
|
||||
Whence int32
|
||||
}
|
546
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go
generated
vendored
Normal file
546
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go
generated
vendored
Normal file
|
@ -0,0 +1,546 @@
|
|||
// +build !windows
|
||||
|
||||
package remotefs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Func is the function definition for a generic remote fs function
|
||||
// The input to the function is any serialized structs / data from in and the string slice
|
||||
// from args. The output of the function will be serialized and written to out.
|
||||
type Func func(stdin io.Reader, stdout io.Writer, args []string) error
|
||||
|
||||
// Commands provide a string -> remotefs function mapping.
|
||||
// This is useful for commandline programs that will receive a string
|
||||
// as the function to execute.
|
||||
var Commands = map[string]Func{
|
||||
StatCmd: Stat,
|
||||
LstatCmd: Lstat,
|
||||
ReadlinkCmd: Readlink,
|
||||
MkdirCmd: Mkdir,
|
||||
MkdirAllCmd: MkdirAll,
|
||||
RemoveCmd: Remove,
|
||||
RemoveAllCmd: RemoveAll,
|
||||
LinkCmd: Link,
|
||||
SymlinkCmd: Symlink,
|
||||
LchmodCmd: Lchmod,
|
||||
LchownCmd: Lchown,
|
||||
MknodCmd: Mknod,
|
||||
MkfifoCmd: Mkfifo,
|
||||
OpenFileCmd: OpenFile,
|
||||
ReadFileCmd: ReadFile,
|
||||
WriteFileCmd: WriteFile,
|
||||
ReadDirCmd: ReadDir,
|
||||
ResolvePathCmd: ResolvePath,
|
||||
ExtractArchiveCmd: ExtractArchive,
|
||||
ArchivePathCmd: ArchivePath,
|
||||
}
|
||||
|
||||
// Stat functions like os.Stat.
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
// Out:
|
||||
// - out = FileInfo object
|
||||
func Stat(in io.Reader, out io.Writer, args []string) error {
|
||||
return stat(in, out, args, os.Stat)
|
||||
}
|
||||
|
||||
// Lstat functions like os.Lstat.
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
// Out:
|
||||
// - out = FileInfo object
|
||||
func Lstat(in io.Reader, out io.Writer, args []string) error {
|
||||
return stat(in, out, args, os.Lstat)
|
||||
}
|
||||
|
||||
func stat(in io.Reader, out io.Writer, args []string, statfunc func(string) (os.FileInfo, error)) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
fi, err := statfunc(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info := FileInfo{
|
||||
NameVar: fi.Name(),
|
||||
SizeVar: fi.Size(),
|
||||
ModeVar: fi.Mode(),
|
||||
ModTimeVar: fi.ModTime().UnixNano(),
|
||||
IsDirVar: fi.IsDir(),
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := out.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Readlink works like os.Readlink
|
||||
// In:
|
||||
// - args[0] is path
|
||||
// Out:
|
||||
// - Write link result to out
|
||||
func Readlink(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
l, err := os.Readlink(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := out.Write([]byte(l)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir works like os.Mkdir
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
// - args[1] is the permissions in octal (like 0755)
|
||||
func Mkdir(in io.Reader, out io.Writer, args []string) error {
|
||||
return mkdir(in, out, args, os.Mkdir)
|
||||
}
|
||||
|
||||
// MkdirAll works like os.MkdirAll.
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
// - args[1] is the permissions in octal (like 0755)
|
||||
func MkdirAll(in io.Reader, out io.Writer, args []string) error {
|
||||
return mkdir(in, out, args, os.MkdirAll)
|
||||
}
|
||||
|
||||
func mkdir(in io.Reader, out io.Writer, args []string, mkdirFunc func(string, os.FileMode) error) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return mkdirFunc(args[0], os.FileMode(perm))
|
||||
}
|
||||
|
||||
// Remove works like os.Remove
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
func Remove(in io.Reader, out io.Writer, args []string) error {
|
||||
return remove(in, out, args, os.Remove)
|
||||
}
|
||||
|
||||
// RemoveAll works like os.RemoveAll
|
||||
// Args:
|
||||
// - args[0] is the path
|
||||
func RemoveAll(in io.Reader, out io.Writer, args []string) error {
|
||||
return remove(in, out, args, os.RemoveAll)
|
||||
}
|
||||
|
||||
func remove(in io.Reader, out io.Writer, args []string, removefunc func(string) error) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
return removefunc(args[0])
|
||||
}
|
||||
|
||||
// Link works like os.Link
|
||||
// Args:
|
||||
// - args[0] = old path name (link source)
|
||||
// - args[1] = new path name (link dest)
|
||||
func Link(in io.Reader, out io.Writer, args []string) error {
|
||||
return link(in, out, args, os.Link)
|
||||
}
|
||||
|
||||
// Symlink works like os.Symlink
|
||||
// Args:
|
||||
// - args[0] = old path name (link source)
|
||||
// - args[1] = new path name (link dest)
|
||||
func Symlink(in io.Reader, out io.Writer, args []string) error {
|
||||
return link(in, out, args, os.Symlink)
|
||||
}
|
||||
|
||||
func link(in io.Reader, out io.Writer, args []string, linkfunc func(string, string) error) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
return linkfunc(args[0], args[1])
|
||||
}
|
||||
|
||||
// Lchmod changes permission of the given file without following symlinks
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = permission mode in octal (like 0755)
|
||||
func Lchmod(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := args[0]
|
||||
if !filepath.IsAbs(path) {
|
||||
path, err = filepath.Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return unix.Fchmodat(0, path, uint32(perm), unix.AT_SYMLINK_NOFOLLOW)
|
||||
}
|
||||
|
||||
// Lchown works like os.Lchown
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = uid in base 10
|
||||
// - args[2] = gid in base 10
|
||||
func Lchown(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 3 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
uid, err := strconv.ParseInt(args[1], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gid, err := strconv.ParseInt(args[2], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Lchown(args[0], int(uid), int(gid))
|
||||
}
|
||||
|
||||
// Mknod works like syscall.Mknod
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = permission mode in octal (like 0755)
|
||||
// - args[2] = major device number in base 10
|
||||
// - args[3] = minor device number in base 10
|
||||
func Mknod(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 4 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
major, err := strconv.ParseInt(args[2], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minor, err := strconv.ParseInt(args[3], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dev := unix.Mkdev(uint32(major), uint32(minor))
|
||||
return unix.Mknod(args[0], uint32(perm), int(dev))
|
||||
}
|
||||
|
||||
// Mkfifo creates a FIFO special file with the given path name and permissions
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = permission mode in octal (like 0755)
|
||||
func Mkfifo(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return unix.Mkfifo(args[0], uint32(perm))
|
||||
}
|
||||
|
||||
// OpenFile works like os.OpenFile. To manage the file pointer state,
|
||||
// this function acts as a single file "file server" with Read/Write/Close
|
||||
// being serialized control codes from in.
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = flag in base 10
|
||||
// - args[2] = permission mode in octal (like 0755)
|
||||
func OpenFile(in io.Reader, out io.Writer, args []string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// error code will be serialized by the caller, so don't write it here
|
||||
WriteFileHeader(out, &FileHeader{Cmd: CmdFailed}, nil)
|
||||
}
|
||||
}()
|
||||
|
||||
if len(args) < 3 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
flag, err := strconv.ParseInt(args[1], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[2], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(args[0], int(flag), os.FileMode(perm))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Signal the client that OpenFile succeeded
|
||||
if err := WriteFileHeader(out, &FileHeader{Cmd: CmdOK}, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
hdr, err := ReadFileHeader(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
switch hdr.Cmd {
|
||||
case Read:
|
||||
buf = make([]byte, hdr.Size, hdr.Size)
|
||||
n, err := f.Read(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:n]
|
||||
case Write:
|
||||
if _, err := io.CopyN(f, in, int64(hdr.Size)); err != nil {
|
||||
return err
|
||||
}
|
||||
case Seek:
|
||||
seekHdr := &SeekHeader{}
|
||||
if err := binary.Read(in, binary.BigEndian, seekHdr); err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := f.Seek(seekHdr.Offset, int(seekHdr.Whence))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buffer := &bytes.Buffer{}
|
||||
if err := binary.Write(buffer, binary.BigEndian, res); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buffer.Bytes()
|
||||
case Close:
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return ErrUnknown
|
||||
}
|
||||
|
||||
retHdr := &FileHeader{
|
||||
Cmd: CmdOK,
|
||||
Size: uint64(len(buf)),
|
||||
}
|
||||
if err := WriteFileHeader(out, retHdr, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hdr.Cmd == Close {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFile works like ioutil.ReadFile but instead writes the file to a writer
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// Out:
|
||||
// - Write file contents to out
|
||||
func ReadFile(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
f, err := os.Open(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(out, f); err != nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteFile works like ioutil.WriteFile but instead reads the file from a reader
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = permission mode in octal (like 0755)
|
||||
// - input data stream from in
|
||||
func WriteFile(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
perm, err := strconv.ParseUint(args[1], 8, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(args[0], os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(f, in); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadDir works like *os.File.Readdir but instead writes the result to a writer
|
||||
// Args:
|
||||
// - args[0] = path
|
||||
// - args[1] = number of directory entries to return. If <= 0, return all entries in directory
|
||||
func ReadDir(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
n, err := strconv.ParseInt(args[1], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Open(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
infos, err := f.Readdir(int(n))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileInfos := make([]FileInfo, len(infos))
|
||||
for i := range infos {
|
||||
fileInfos[i] = FileInfo{
|
||||
NameVar: infos[i].Name(),
|
||||
SizeVar: infos[i].Size(),
|
||||
ModeVar: infos[i].Mode(),
|
||||
ModTimeVar: infos[i].ModTime().UnixNano(),
|
||||
IsDirVar: infos[i].IsDir(),
|
||||
}
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(fileInfos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := out.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResolvePath works like docker's symlink.FollowSymlinkInScope.
|
||||
// It takens in a `path` and a `root` and evaluates symlinks in `path`
|
||||
// as if they were scoped in `root`. `path` must be a child path of `root`.
|
||||
// In other words, `path` must have `root` as a prefix.
|
||||
// Example:
|
||||
// path=/foo/bar -> /baz
|
||||
// root=/foo,
|
||||
// Expected result = /foo/baz
|
||||
//
|
||||
// Args:
|
||||
// - args[0] is `path`
|
||||
// - args[1] is `root`
|
||||
// Out:
|
||||
// - Write resolved path to stdout
|
||||
func ResolvePath(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return ErrInvalid
|
||||
}
|
||||
res, err := symlink.FollowSymlinkInScope(args[0], args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = out.Write([]byte(res)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractArchive extracts the archive read from in.
|
||||
// Args:
|
||||
// - in = size of json | json of archive.TarOptions | input tar stream
|
||||
// - args[0] = extract directory name
|
||||
func ExtractArchive(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
opts, err := ReadTarOptions(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := archive.Untar(in, args[0], opts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ArchivePath archives the given directory and writes it to out.
|
||||
// Args:
|
||||
// - in = size of json | json of archive.TarOptions
|
||||
// - args[0] = source directory name
|
||||
// Out:
|
||||
// - out = tar file of the archive
|
||||
func ArchivePath(in io.Reader, out io.Writer, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
opts, err := ReadTarOptions(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := archive.TarWithOptions(args[0], opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(out, r); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
168
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go
generated
vendored
Normal file
168
vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
package remotefs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
// ReadError is an utility function that reads a serialized error from the given reader
|
||||
// and deserializes it.
|
||||
func ReadError(in io.Reader) (*ExportedError, error) {
|
||||
b, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No error
|
||||
if len(b) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var exportedErr ExportedError
|
||||
if err := json.Unmarshal(b, &exportedErr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &exportedErr, nil
|
||||
}
|
||||
|
||||
// ExportedToError will convert a ExportedError to an error. It will try to match
|
||||
// the error to any existing known error like os.ErrNotExist. Otherwise, it will just
|
||||
// return an implementation of the error interface.
|
||||
func ExportedToError(ee *ExportedError) error {
|
||||
if ee.Error() == os.ErrNotExist.Error() {
|
||||
return os.ErrNotExist
|
||||
} else if ee.Error() == os.ErrExist.Error() {
|
||||
return os.ErrExist
|
||||
} else if ee.Error() == os.ErrPermission.Error() {
|
||||
return os.ErrPermission
|
||||
}
|
||||
return ee
|
||||
}
|
||||
|
||||
// WriteError is an utility function that serializes the error
|
||||
// and writes it to the output writer.
|
||||
func WriteError(err error, out io.Writer) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = fixOSError(err)
|
||||
|
||||
var errno int
|
||||
switch typedError := err.(type) {
|
||||
case *os.PathError:
|
||||
if se, ok := typedError.Err.(syscall.Errno); ok {
|
||||
errno = int(se)
|
||||
}
|
||||
case *os.LinkError:
|
||||
if se, ok := typedError.Err.(syscall.Errno); ok {
|
||||
errno = int(se)
|
||||
}
|
||||
case *os.SyscallError:
|
||||
if se, ok := typedError.Err.(syscall.Errno); ok {
|
||||
errno = int(se)
|
||||
}
|
||||
}
|
||||
|
||||
exportedError := &ExportedError{
|
||||
ErrString: err.Error(),
|
||||
ErrNum: errno,
|
||||
}
|
||||
|
||||
b, err1 := json.Marshal(exportedError)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
|
||||
_, err1 = out.Write(b)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fixOSError converts possible platform dependent error into the portable errors in the
|
||||
// Go os package if possible.
|
||||
func fixOSError(err error) error {
|
||||
// The os.IsExist, os.IsNotExist, and os.IsPermissions functions are platform
|
||||
// dependent, so sending the raw error might break those functions on a different OS.
|
||||
// Go defines portable errors for these.
|
||||
if os.IsExist(err) {
|
||||
return os.ErrExist
|
||||
} else if os.IsNotExist(err) {
|
||||
return os.ErrNotExist
|
||||
} else if os.IsPermission(err) {
|
||||
return os.ErrPermission
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadTarOptions reads from the specified reader and deserializes an archive.TarOptions struct.
|
||||
func ReadTarOptions(r io.Reader) (*archive.TarOptions, error) {
|
||||
var size uint64
|
||||
if err := binary.Read(r, binary.BigEndian, &size); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawJSON := make([]byte, size)
|
||||
if _, err := io.ReadFull(r, rawJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var opts archive.TarOptions
|
||||
if err := json.Unmarshal(rawJSON, &opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &opts, nil
|
||||
}
|
||||
|
||||
// WriteTarOptions serializes a archive.TarOptions struct and writes it to the writer.
|
||||
func WriteTarOptions(w io.Writer, opts *archive.TarOptions) error {
|
||||
optsBuf, err := json.Marshal(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optsSize := uint64(len(optsBuf))
|
||||
optsSizeBuf := &bytes.Buffer{}
|
||||
if err := binary.Write(optsSizeBuf, binary.BigEndian, optsSize); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := optsSizeBuf.WriteTo(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(optsBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFileHeader reads from r and returns a deserialized FileHeader
|
||||
func ReadFileHeader(r io.Reader) (*FileHeader, error) {
|
||||
hdr := &FileHeader{}
|
||||
if err := binary.Read(r, binary.BigEndian, hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// WriteFileHeader serializes a FileHeader and writes it to w, along with any extra data
|
||||
func WriteFileHeader(w io.Writer, hdr *FileHeader, extraData []byte) error {
|
||||
if err := binary.Write(w, binary.BigEndian, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(extraData); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
202
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
Normal file
202
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
74
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
74
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
# continuity
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity)
|
||||
[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity)
|
||||
|
||||
A transport-agnostic, filesystem metadata manifest system
|
||||
|
||||
This project is a staging area for experiments in providing transport agnostic
|
||||
metadata storage.
|
||||
|
||||
Please see https://github.com/opencontainers/specs/issues/11 for more details.
|
||||
|
||||
## Manifest Format
|
||||
|
||||
A continuity manifest encodes filesystem metadata in Protocol Buffers.
|
||||
Please refer to [proto/manifest.proto](proto/manifest.proto).
|
||||
|
||||
## Usage
|
||||
|
||||
Build:
|
||||
|
||||
```console
|
||||
$ make
|
||||
```
|
||||
|
||||
Create a manifest (of this repo itself):
|
||||
|
||||
```console
|
||||
$ ./bin/continuity build . > /tmp/a.pb
|
||||
```
|
||||
|
||||
Dump a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity ls /tmp/a.pb
|
||||
...
|
||||
-rw-rw-r-- 270 B /.gitignore
|
||||
-rw-rw-r-- 88 B /.mailmap
|
||||
-rw-rw-r-- 187 B /.travis.yml
|
||||
-rw-rw-r-- 359 B /AUTHORS
|
||||
-rw-rw-r-- 11 kB /LICENSE
|
||||
-rw-rw-r-- 1.5 kB /Makefile
|
||||
...
|
||||
-rw-rw-r-- 986 B /testutil_test.go
|
||||
drwxrwxr-x 0 B /version
|
||||
-rw-rw-r-- 478 B /version/version.go
|
||||
```
|
||||
|
||||
Verify a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
Break the directory and restore using the manifest:
|
||||
```console
|
||||
$ chmod 777 Makefile
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
|
||||
$ ./bin/continuity apply . /tmp/a.pb
|
||||
$ stat -c %a Makefile
|
||||
664
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
|
||||
## Contribution Guide
|
||||
### Building Proto Package
|
||||
|
||||
If you change the proto file you will need to rebuild the generated Go with `go generate`.
|
||||
|
||||
```console
|
||||
$ go generate ./proto
|
||||
```
|
5
vendor/github.com/containerd/continuity/devices/devices.go
generated
vendored
Normal file
5
vendor/github.com/containerd/continuity/devices/devices.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
package devices
|
||||
|
||||
import "fmt"
|
||||
|
||||
var ErrNotSupported = fmt.Errorf("not supported")
|
15
vendor/github.com/containerd/continuity/devices/devices_darwin.go
generated
vendored
Normal file
15
vendor/github.com/containerd/continuity/devices/devices_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package devices
|
||||
|
||||
// from /usr/include/sys/types.h
|
||||
|
||||
func getmajor(dev int32) uint64 {
|
||||
return (uint64(dev) >> 24) & 0xff
|
||||
}
|
||||
|
||||
func getminor(dev int32) uint64 {
|
||||
return uint64(dev) & 0xffffff
|
||||
}
|
||||
|
||||
func makedev(major int, minor int) int {
|
||||
return ((major << 24) | minor)
|
||||
}
|
23
vendor/github.com/containerd/continuity/devices/devices_dummy.go
generated
vendored
Normal file
23
vendor/github.com/containerd/continuity/devices/devices_dummy.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// +build solaris,!cgo
|
||||
|
||||
//
|
||||
// Implementing the functions below requires cgo support. Non-cgo stubs
|
||||
// versions are defined below to enable cross-compilation of source code
|
||||
// that depends on these functions, but the resultant cross-compiled
|
||||
// binaries cannot actually be used. If the stub function(s) below are
|
||||
// actually invoked they will cause the calling process to exit.
|
||||
//
|
||||
|
||||
package devices
|
||||
|
||||
func getmajor(dev uint64) uint64 {
|
||||
panic("getmajor() support requires cgo.")
|
||||
}
|
||||
|
||||
func getminor(dev uint64) uint64 {
|
||||
panic("getminor() support requires cgo.")
|
||||
}
|
||||
|
||||
func makedev(major int, minor int) int {
|
||||
panic("makedev() support requires cgo.")
|
||||
}
|
15
vendor/github.com/containerd/continuity/devices/devices_freebsd.go
generated
vendored
Normal file
15
vendor/github.com/containerd/continuity/devices/devices_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package devices
|
||||
|
||||
// from /usr/include/sys/types.h
|
||||
|
||||
func getmajor(dev uint32) uint64 {
|
||||
return (uint64(dev) >> 24) & 0xff
|
||||
}
|
||||
|
||||
func getminor(dev uint32) uint64 {
|
||||
return uint64(dev) & 0xffffff
|
||||
}
|
||||
|
||||
func makedev(major int, minor int) int {
|
||||
return ((major << 24) | minor)
|
||||
}
|
15
vendor/github.com/containerd/continuity/devices/devices_linux.go
generated
vendored
Normal file
15
vendor/github.com/containerd/continuity/devices/devices_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package devices
|
||||
|
||||
// from /usr/include/linux/kdev_t.h
|
||||
|
||||
func getmajor(dev uint64) uint64 {
|
||||
return dev >> 8
|
||||
}
|
||||
|
||||
func getminor(dev uint64) uint64 {
|
||||
return dev & 0xff
|
||||
}
|
||||
|
||||
func makedev(major int, minor int) int {
|
||||
return ((major << 8) | minor)
|
||||
}
|
18
vendor/github.com/containerd/continuity/devices/devices_solaris.go
generated
vendored
Normal file
18
vendor/github.com/containerd/continuity/devices/devices_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// +build cgo
|
||||
|
||||
package devices
|
||||
|
||||
//#include <sys/mkdev.h>
|
||||
import "C"
|
||||
|
||||
func getmajor(dev uint64) uint64 {
|
||||
return uint64(C.major(C.dev_t(dev)))
|
||||
}
|
||||
|
||||
func getminor(dev uint64) uint64 {
|
||||
return uint64(C.minor(C.dev_t(dev)))
|
||||
}
|
||||
|
||||
func makedev(major int, minor int) int {
|
||||
return int(C.makedev(C.major_t(major), C.minor_t(minor)))
|
||||
}
|
55
vendor/github.com/containerd/continuity/devices/devices_unix.go
generated
vendored
Normal file
55
vendor/github.com/containerd/continuity/devices/devices_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
// +build linux darwin freebsd solaris
|
||||
|
||||
package devices
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo")
|
||||
}
|
||||
|
||||
return getmajor(sys.Rdev), getminor(sys.Rdev), nil
|
||||
}
|
||||
|
||||
// mknod provides a shortcut for syscall.Mknod
|
||||
func Mknod(p string, mode os.FileMode, maj, min int) error {
|
||||
var (
|
||||
m = syscallMode(mode.Perm())
|
||||
dev int
|
||||
)
|
||||
|
||||
if mode&os.ModeDevice != 0 {
|
||||
dev = makedev(maj, min)
|
||||
|
||||
if mode&os.ModeCharDevice != 0 {
|
||||
m |= syscall.S_IFCHR
|
||||
} else {
|
||||
m |= syscall.S_IFBLK
|
||||
}
|
||||
} else if mode&os.ModeNamedPipe != 0 {
|
||||
m |= syscall.S_IFIFO
|
||||
}
|
||||
|
||||
return syscall.Mknod(p, m, dev)
|
||||
}
|
||||
|
||||
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
return
|
||||
}
|
11
vendor/github.com/containerd/continuity/devices/devices_windows.go
generated
vendored
Normal file
11
vendor/github.com/containerd/continuity/devices/devices_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
package devices
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
|
||||
return 0, 0, errors.Wrap(ErrNotSupported, "cannot get device info on windows")
|
||||
}
|
162
vendor/github.com/containerd/continuity/driver/driver.go
generated
vendored
Normal file
162
vendor/github.com/containerd/continuity/driver/driver.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
|||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
var ErrNotSupported = fmt.Errorf("not supported")
|
||||
|
||||
// Driver provides all of the system-level functions in a common interface.
|
||||
// The context should call these with full paths and should never use the `os`
|
||||
// package or any other package to access resources on the filesystem. This
|
||||
// mechanism let's us carefully control access to the context and maintain
|
||||
// path and resource integrity. It also gives us an interface to reason about
|
||||
// direct resource access.
|
||||
//
|
||||
// Implementations don't need to do much other than meet the interface. For
|
||||
// example, it is not required to wrap os.FileInfo to return correct paths for
|
||||
// the call to Name().
|
||||
type Driver interface {
|
||||
// Note that Open() returns a File interface instead of *os.File. This
|
||||
// is because os.File is a struct, so if Open was to return *os.File,
|
||||
// the only way to fulfill the interface would be to call os.Open()
|
||||
Open(path string) (File, error)
|
||||
OpenFile(path string, flag int, perm os.FileMode) (File, error)
|
||||
|
||||
Stat(path string) (os.FileInfo, error)
|
||||
Lstat(path string) (os.FileInfo, error)
|
||||
Readlink(p string) (string, error)
|
||||
Mkdir(path string, mode os.FileMode) error
|
||||
Remove(path string) error
|
||||
|
||||
Link(oldname, newname string) error
|
||||
Lchmod(path string, mode os.FileMode) error
|
||||
Lchown(path string, uid, gid int64) error
|
||||
Symlink(oldname, newname string) error
|
||||
|
||||
MkdirAll(path string, perm os.FileMode) error
|
||||
RemoveAll(path string) error
|
||||
|
||||
// TODO(aaronl): These methods might move outside the main Driver
|
||||
// interface in the future as more platforms are added.
|
||||
Mknod(path string, mode os.FileMode, major int, minor int) error
|
||||
Mkfifo(path string, mode os.FileMode) error
|
||||
}
|
||||
|
||||
// File is the interface for interacting with files returned by continuity's Open
|
||||
// This is needed since os.File is a struct, instead of an interface, so it can't
|
||||
// be used.
|
||||
type File interface {
|
||||
io.ReadWriteCloser
|
||||
io.Seeker
|
||||
Readdir(n int) ([]os.FileInfo, error)
|
||||
}
|
||||
|
||||
func NewSystemDriver() (Driver, error) {
|
||||
// TODO(stevvooe): Consider having this take a "hint" path argument, which
|
||||
// would be the context root. The hint could be used to resolve required
|
||||
// filesystem support when assembling the driver to use.
|
||||
return &driver{}, nil
|
||||
}
|
||||
|
||||
// XAttrDriver should be implemented on operation systems and filesystems that
|
||||
// have xattr support for regular files and directories.
|
||||
type XAttrDriver interface {
|
||||
// Getxattr returns all of the extended attributes for the file at path.
|
||||
// Typically, this takes a syscall call to Listxattr and Getxattr.
|
||||
Getxattr(path string) (map[string][]byte, error)
|
||||
|
||||
// Setxattr sets all of the extended attributes on file at path, following
|
||||
// any symbolic links, if necessary. All attributes on the target are
|
||||
// replaced by the values from attr. If the operation fails to set any
|
||||
// attribute, those already applied will not be rolled back.
|
||||
Setxattr(path string, attr map[string][]byte) error
|
||||
}
|
||||
|
||||
// LXAttrDriver should be implemented by drivers on operating systems and
|
||||
// filesystems that support setting and getting extended attributes on
|
||||
// symbolic links. If this is not implemented, extended attributes will be
|
||||
// ignored on symbolic links.
|
||||
type LXAttrDriver interface {
|
||||
// LGetxattr returns all of the extended attributes for the file at path
|
||||
// and does not follow symlinks. Typically, this takes a syscall call to
|
||||
// Llistxattr and Lgetxattr.
|
||||
LGetxattr(path string) (map[string][]byte, error)
|
||||
|
||||
// LSetxattr sets all of the extended attributes on file at path, without
|
||||
// following symbolic links. All attributes on the target are replaced by
|
||||
// the values from attr. If the operation fails to set any attribute,
|
||||
// those already applied will not be rolled back.
|
||||
LSetxattr(path string, attr map[string][]byte) error
|
||||
}
|
||||
|
||||
type DeviceInfoDriver interface {
|
||||
DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error)
|
||||
}
|
||||
|
||||
// driver is a simple default implementation that sends calls out to the "os"
|
||||
// package. Extend the "driver" type in system-specific files to add support,
|
||||
// such as xattrs, which can add support at compile time.
|
||||
type driver struct{}
|
||||
|
||||
var _ File = &os.File{}
|
||||
|
||||
// LocalDriver is the exported Driver struct for convenience.
|
||||
var LocalDriver Driver = &driver{}
|
||||
|
||||
func (d *driver) Open(p string) (File, error) {
|
||||
return os.Open(p)
|
||||
}
|
||||
|
||||
func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) {
|
||||
return os.OpenFile(path, flag, perm)
|
||||
}
|
||||
|
||||
func (d *driver) Stat(p string) (os.FileInfo, error) {
|
||||
return os.Stat(p)
|
||||
}
|
||||
|
||||
func (d *driver) Lstat(p string) (os.FileInfo, error) {
|
||||
return os.Lstat(p)
|
||||
}
|
||||
|
||||
func (d *driver) Readlink(p string) (string, error) {
|
||||
return os.Readlink(p)
|
||||
}
|
||||
|
||||
func (d *driver) Mkdir(p string, mode os.FileMode) error {
|
||||
return os.Mkdir(p, mode)
|
||||
}
|
||||
|
||||
// Remove is used to unlink files and remove directories.
|
||||
// This is following the golang os package api which
|
||||
// combines the operations into a higher level Remove
|
||||
// function. If explicit unlinking or directory removal
|
||||
// to mirror system call is required, they should be
|
||||
// split up at that time.
|
||||
func (d *driver) Remove(path string) error {
|
||||
return os.Remove(path)
|
||||
}
|
||||
|
||||
func (d *driver) Link(oldname, newname string) error {
|
||||
return os.Link(oldname, newname)
|
||||
}
|
||||
|
||||
func (d *driver) Lchown(name string, uid, gid int64) error {
|
||||
// TODO: error out if uid excesses int bit width?
|
||||
return os.Lchown(name, int(uid), int(gid))
|
||||
}
|
||||
|
||||
func (d *driver) Symlink(oldname, newname string) error {
|
||||
return os.Symlink(oldname, newname)
|
||||
}
|
||||
|
||||
func (d *driver) MkdirAll(path string, perm os.FileMode) error {
|
||||
return os.MkdirAll(path, perm)
|
||||
}
|
||||
|
||||
func (d *driver) RemoveAll(path string) error {
|
||||
return os.RemoveAll(path)
|
||||
}
|
122
vendor/github.com/containerd/continuity/driver/driver_unix.go
generated
vendored
Normal file
122
vendor/github.com/containerd/continuity/driver/driver_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
// +build linux darwin freebsd solaris
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/continuity/devices"
|
||||
"github.com/containerd/continuity/sysx"
|
||||
)
|
||||
|
||||
func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error {
|
||||
return devices.Mknod(path, mode, major, minor)
|
||||
}
|
||||
|
||||
func (d *driver) Mkfifo(path string, mode os.FileMode) error {
|
||||
if mode&os.ModeNamedPipe == 0 {
|
||||
return errors.New("mode passed to Mkfifo does not have the named pipe bit set")
|
||||
}
|
||||
// mknod with a mode that has ModeNamedPipe set creates a fifo, not a
|
||||
// device.
|
||||
return devices.Mknod(path, mode, 0, 0)
|
||||
}
|
||||
|
||||
// Lchmod changes the mode of an file not following symlinks.
|
||||
func (d *driver) Lchmod(path string, mode os.FileMode) (err error) {
|
||||
if !filepath.IsAbs(path) {
|
||||
path, err = filepath.Abs(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return sysx.Fchmodat(0, path, uint32(mode), sysx.AtSymlinkNofollow)
|
||||
}
|
||||
|
||||
// Getxattr returns all of the extended attributes for the file at path p.
|
||||
func (d *driver) Getxattr(p string) (map[string][]byte, error) {
|
||||
xattrs, err := sysx.Listxattr(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing %s xattrs: %v", p, err)
|
||||
}
|
||||
|
||||
sort.Strings(xattrs)
|
||||
m := make(map[string][]byte, len(xattrs))
|
||||
|
||||
for _, attr := range xattrs {
|
||||
value, err := sysx.Getxattr(p, attr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err)
|
||||
}
|
||||
|
||||
// NOTE(stevvooe): This append/copy tricky relies on unique
|
||||
// xattrs. Break this out into an alloc/copy if xattrs are no
|
||||
// longer unique.
|
||||
m[attr] = append(m[attr], value...)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Setxattr sets all of the extended attributes on file at path, following
|
||||
// any symbolic links, if necessary. All attributes on the target are
|
||||
// replaced by the values from attr. If the operation fails to set any
|
||||
// attribute, those already applied will not be rolled back.
|
||||
func (d *driver) Setxattr(path string, attrMap map[string][]byte) error {
|
||||
for attr, value := range attrMap {
|
||||
if err := sysx.Setxattr(path, attr, value, 0); err != nil {
|
||||
return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LGetxattr returns all of the extended attributes for the file at path p
|
||||
// not following symbolic links.
|
||||
func (d *driver) LGetxattr(p string) (map[string][]byte, error) {
|
||||
xattrs, err := sysx.LListxattr(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing %s xattrs: %v", p, err)
|
||||
}
|
||||
|
||||
sort.Strings(xattrs)
|
||||
m := make(map[string][]byte, len(xattrs))
|
||||
|
||||
for _, attr := range xattrs {
|
||||
value, err := sysx.LGetxattr(p, attr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err)
|
||||
}
|
||||
|
||||
// NOTE(stevvooe): This append/copy tricky relies on unique
|
||||
// xattrs. Break this out into an alloc/copy if xattrs are no
|
||||
// longer unique.
|
||||
m[attr] = append(m[attr], value...)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// LSetxattr sets all of the extended attributes on file at path, not
|
||||
// following any symbolic links. All attributes on the target are
|
||||
// replaced by the values from attr. If the operation fails to set any
|
||||
// attribute, those already applied will not be rolled back.
|
||||
func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error {
|
||||
for attr, value := range attrMap {
|
||||
if err := sysx.LSetxattr(path, attr, value, 0); err != nil {
|
||||
return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) {
|
||||
return devices.DeviceInfo(fi)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue