S3: add support for serving virtual folders

inside the same bucket each user can be assigned to a virtual folder.
This is similar to a chroot directory for local filesystem
This commit is contained in:
Nicola Murino 2020-01-19 23:23:09 +01:00
parent d75f56b914
commit 4463421028
19 changed files with 241 additions and 86 deletions

View file

@ -419,8 +419,13 @@ The HTTP request has a 15 seconds timeout.
## S3 Compabible Object Storage backends
Each user can be mapped with an S3-Compatible bucket, this way the mapped bucket is exposed over SFTP/SCP.
SFTPGo uses multipart uploads and parallel downloads for storing and retrieving files from S3 and automatically try to create the mapped bucket if it does not exists.
Each user can be mapped with an S3-Compatible bucket or a bucket virtual folder, this way the mapped bucket/virtual folder is exposed over SFTP/SCP.
Specifying a different `key_prefix` you can assign different virtual folders of the same bucket to different users. This is similar to a chroot directory for local filesystem. The virtual folder identified by `key_prefix` does not need to be pre-created.
SFTPGo uses multipart uploads and parallel downloads for storing and retrieving files from S3.
SFTPGo tries to automatically create the mapped bucket if it does not exists but it's a better idea to pre-create the bucket and to assign to it the wanted options such as automatic encryption and authorizations.
Some SFTP commands doesn't work over S3:
@ -465,6 +470,7 @@ Flags:
--s3-access-secret string
--s3-bucket string
--s3-endpoint string
--s3-key-prefix string Allows to restrict access to the virtual folder identified by this prefix and its contents
--s3-region string
--s3-storage-class string
-s, --sftpd-port int 0 means a random non privileged port
@ -522,6 +528,7 @@ For each account the following properties can be configured:
- `s3_access_secret`, required for S3 filesystem. It is stored encrypted (AES-256-GCM)
- `s3_endpoint`, specifies s3 endpoint (server) different from AWS
- `s3_storage_class`
- `s3_key_prefix`, allows to restrict access to the virtual folder identified by this prefix and its contents
These properties are stored inside the data provider.

View file

@ -28,6 +28,7 @@ var (
portableS3AccessSecret string
portableS3Endpoint string
portableS3StorageClass string
portableS3KeyPrefix string
portableCmd = &cobra.Command{
Use: "portable",
Short: "Serve a single directory",
@ -70,6 +71,7 @@ Please take a look at the usage below to customize the serving parameters`,
AccessSecret: portableS3AccessSecret,
Endpoint: portableS3Endpoint,
StorageClass: portableS3StorageClass,
KeyPrefix: portableS3KeyPrefix,
},
},
},
@ -105,5 +107,7 @@ func init() {
portableCmd.Flags().StringVar(&portableS3AccessSecret, "s3-access-secret", "", "")
portableCmd.Flags().StringVar(&portableS3Endpoint, "s3-endpoint", "", "")
portableCmd.Flags().StringVar(&portableS3StorageClass, "s3-storage-class", "", "")
portableCmd.Flags().StringVar(&portableS3KeyPrefix, "s3-key-prefix", "", "Allows to restrict access to the virtual folder "+
"identified by this prefix and its contents")
rootCmd.AddCommand(portableCmd)
}

View file

@ -413,19 +413,19 @@ func buildUserHomeDir(user *User) {
func validatePermissions(user *User) error {
if len(user.Permissions) == 0 {
return &ValidationError{err: "Please grant some permissions to this user"}
return &ValidationError{err: "please grant some permissions to this user"}
}
permissions := make(map[string][]string)
if _, ok := user.Permissions["/"]; !ok {
return &ValidationError{err: fmt.Sprintf("Permissions for the root dir \"/\" must be set")}
return &ValidationError{err: fmt.Sprintf("permissions for the root dir \"/\" must be set")}
}
for dir, perms := range user.Permissions {
if len(perms) == 0 {
return &ValidationError{err: fmt.Sprintf("No permissions granted for the directory: %#v", dir)}
return &ValidationError{err: fmt.Sprintf("no permissions granted for the directory: %#v", dir)}
}
for _, p := range perms {
if !utils.IsStringInSlice(p, ValidPerms) {
return &ValidationError{err: fmt.Sprintf("Invalid permission: %#v", p)}
return &ValidationError{err: fmt.Sprintf("invalid permission: %#v", p)}
}
}
cleanedDir := filepath.ToSlash(path.Clean(dir))
@ -433,7 +433,7 @@ func validatePermissions(user *User) error {
cleanedDir = strings.TrimSuffix(cleanedDir, "/")
}
if !path.IsAbs(cleanedDir) {
return &ValidationError{err: fmt.Sprintf("Cannot set permissions for non absolute path: %#v", dir)}
return &ValidationError{err: fmt.Sprintf("cannot set permissions for non absolute path: %#v", dir)}
}
if utils.IsStringInSlice(PermAny, perms) {
permissions[cleanedDir] = []string{PermAny}
@ -452,7 +452,7 @@ func validatePublicKeys(user *User) error {
for i, k := range user.PublicKeys {
_, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k))
if err != nil {
return &ValidationError{err: fmt.Sprintf("Could not parse key nr. %d: %s", i, err)}
return &ValidationError{err: fmt.Sprintf("could not parse key nr. %d: %s", i, err)}
}
}
return nil
@ -468,13 +468,13 @@ func validateFilters(user *User) error {
for _, IPMask := range user.Filters.DeniedIP {
_, _, err := net.ParseCIDR(IPMask)
if err != nil {
return &ValidationError{err: fmt.Sprintf("Could not parse denied IP/Mask %#v : %v", IPMask, err)}
return &ValidationError{err: fmt.Sprintf("could not parse denied IP/Mask %#v : %v", IPMask, err)}
}
}
for _, IPMask := range user.Filters.AllowedIP {
_, _, err := net.ParseCIDR(IPMask)
if err != nil {
return &ValidationError{err: fmt.Sprintf("Could not parse allowed IP/Mask %#v : %v", IPMask, err)}
return &ValidationError{err: fmt.Sprintf("could not parse allowed IP/Mask %#v : %v", IPMask, err)}
}
}
return nil
@ -484,13 +484,13 @@ func validateFilesystemConfig(user *User) error {
if user.FsConfig.Provider == 1 {
err := vfs.ValidateS3FsConfig(&user.FsConfig.S3Config)
if err != nil {
return &ValidationError{err: fmt.Sprintf("Could not validate s3config: %v", err)}
return &ValidationError{err: fmt.Sprintf("could not validate s3config: %v", err)}
}
vals := strings.Split(user.FsConfig.S3Config.AccessSecret, "$")
if !strings.HasPrefix(user.FsConfig.S3Config.AccessSecret, "$aes$") || len(vals) != 4 {
accessSecret, err := utils.EncryptData(user.FsConfig.S3Config.AccessSecret)
if err != nil {
return &ValidationError{err: fmt.Sprintf("Could encrypt s3 access secret: %v", err)}
return &ValidationError{err: fmt.Sprintf("could not encrypt s3 access secret: %v", err)}
}
user.FsConfig.S3Config.AccessSecret = accessSecret
}
@ -504,10 +504,10 @@ func validateFilesystemConfig(user *User) error {
func validateUser(user *User) error {
buildUserHomeDir(user)
if len(user.Username) == 0 || len(user.HomeDir) == 0 {
return &ValidationError{err: "Mandatory parameters missing"}
return &ValidationError{err: "mandatory parameters missing"}
}
if len(user.Password) == 0 && len(user.PublicKeys) == 0 {
return &ValidationError{err: "Please set a password or at least a public_key"}
return &ValidationError{err: "please set a password or at least a public_key"}
}
if !filepath.IsAbs(user.HomeDir) {
return &ValidationError{err: fmt.Sprintf("home_dir must be an absolute path, actual value: %v", user.HomeDir)}

View file

@ -408,6 +408,7 @@ func (u *User) getACopy() User {
AccessSecret: u.FsConfig.S3Config.AccessSecret,
Endpoint: u.FsConfig.S3Config.Endpoint,
StorageClass: u.FsConfig.S3Config.StorageClass,
KeyPrefix: u.FsConfig.S3Config.KeyPrefix,
},
}

View file

@ -435,6 +435,10 @@ func compareUserFsConfig(expected *dataprovider.User, actual *dataprovider.User)
if expected.FsConfig.S3Config.StorageClass != actual.FsConfig.S3Config.StorageClass {
return errors.New("S3 storage class mismatch")
}
if expected.FsConfig.S3Config.KeyPrefix != actual.FsConfig.S3Config.KeyPrefix &&
expected.FsConfig.S3Config.KeyPrefix+"/" != actual.FsConfig.S3Config.KeyPrefix {
return errors.New("S3 key prefix mismatch")
}
return nil
}

View file

@ -250,6 +250,17 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
if err != nil {
t.Errorf("unexpected error adding user with invalid fs config: %v", err)
}
u.FsConfig.S3Config.Bucket = "test"
u.FsConfig.S3Config.Region = "eu-west-1"
u.FsConfig.S3Config.AccessKey = "access-key"
u.FsConfig.S3Config.AccessSecret = "access-secret"
u.FsConfig.S3Config.Endpoint = "http://127.0.0.1:9000/path?a=b"
u.FsConfig.S3Config.StorageClass = "Standard"
u.FsConfig.S3Config.KeyPrefix = "/somedir/subdir/"
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error adding user with invalid fs config: %v", err)
}
}
func TestUserPublicKey(t *testing.T) {
@ -341,6 +352,7 @@ func TestUserS3Config(t *testing.T) {
user.FsConfig.S3Config.Region = "us-east-1"
user.FsConfig.S3Config.AccessKey = "Server-Access-Key1"
user.FsConfig.S3Config.Endpoint = "http://localhost:9000"
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir"
user, _, err = httpd.UpdateUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to update user: %v", err)
@ -1467,6 +1479,7 @@ func TestWebUserS3Mock(t *testing.T) {
user.FsConfig.S3Config.AccessSecret = "access-secret"
user.FsConfig.S3Config.Endpoint = "http://127.0.0.1:9000/path?a=b"
user.FsConfig.S3Config.StorageClass = "Standard"
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir/"
form := make(url.Values)
form.Set("username", user.Username)
form.Set("home_dir", user.HomeDir)
@ -1490,6 +1503,7 @@ func TestWebUserS3Mock(t *testing.T) {
form.Set("s3_access_secret", user.FsConfig.S3Config.AccessSecret)
form.Set("s3_storage_class", user.FsConfig.S3Config.StorageClass)
form.Set("s3_endpoint", user.FsConfig.S3Config.Endpoint)
form.Set("s3_key_prefix", user.FsConfig.S3Config.KeyPrefix)
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), strings.NewReader(form.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
rr = executeRequest(req)
@ -1530,6 +1544,9 @@ func TestWebUserS3Mock(t *testing.T) {
if updateUser.FsConfig.S3Config.Endpoint != user.FsConfig.S3Config.Endpoint {
t.Error("s3 endpoint mismatch")
}
if updateUser.FsConfig.S3Config.KeyPrefix != user.FsConfig.S3Config.KeyPrefix {
t.Error("s3 key prefix mismatch")
}
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)

View file

@ -274,6 +274,12 @@ func TestCompareUserFsConfig(t *testing.T) {
if err == nil {
t.Errorf("S3 storage class does not match")
}
expected.FsConfig.S3Config.StorageClass = ""
expected.FsConfig.S3Config.KeyPrefix = "somedir/subdir"
err = compareUserFsConfig(expected, actual)
if err == nil {
t.Errorf("S3 key prefix does not match")
}
}
func TestApiCallsWithBadURL(t *testing.T) {

View file

@ -732,6 +732,10 @@ components:
description: optional endpoint
storage_class:
type: string
key_prefix:
type: string
description: key_prefix is similar to a chroot directory for a local filesystem. If specified the SFTP user will only see contents that starts with this prefix and so you can restrict access to a specific virtual folder. The prefix, if not empty, must not start with "/" and must end with "/". If empty the whole bucket contents will be available
example: folder/subfolder/
required:
- bucket
- region

View file

@ -238,6 +238,7 @@ func getFsConfigFromUserPostFields(r *http.Request) dataprovider.Filesystem {
fs.S3Config.AccessSecret = r.Form.Get("s3_access_secret")
fs.S3Config.Endpoint = r.Form.Get("s3_endpoint")
fs.S3Config.StorageClass = r.Form.Get("s3_storage_class")
fs.S3Config.KeyPrefix = r.Form.Get("s3_key_prefix")
}
return fs
}

View file

@ -44,7 +44,7 @@ Let's see a sample usage for each REST API.
Command:
```
python sftpgo_api_cli.py add-user test_username --password "test_pwd" --home-dir="/tmp/test_home_dir" --uid 33 --gid 1000 --max-sessions 2 --quota-size 0 --quota-files 3 --permissions "list" "download" "upload" "delete" "rename" "create_dirs" "overwrite" --subdirs-permissions "/dir1:list,download" "/dir2:*" --upload-bandwidth 100 --download-bandwidth 60 --status 0 --expiration-date 2019-01-01 --allowed-ip "192.168.1.1/32" --fs S3 --s3-bucket test --s3-region eu-west-1 --s3-access-key accesskey --s3-access-secret secret --s3-endpoint "http://127.0.0.1:9000" --s3-storage-class Standard
python sftpgo_api_cli.py add-user test_username --password "test_pwd" --home-dir="/tmp/test_home_dir" --uid 33 --gid 1000 --max-sessions 2 --quota-size 0 --quota-files 3 --permissions "list" "download" "upload" "delete" "rename" "create_dirs" "overwrite" --subdirs-permissions "/dir1:list,download" "/dir2:*" --upload-bandwidth 100 --download-bandwidth 60 --status 0 --expiration-date 2019-01-01 --allowed-ip "192.168.1.1/32" --fs S3 --s3-bucket test --s3-region eu-west-1 --s3-access-key accesskey --s3-access-secret secret --s3-endpoint "http://127.0.0.1:9000" --s3-storage-class Standard --s3-key-prefix "vfolder/"
```
Output:
@ -60,6 +60,7 @@ Output:
"access_secret": "$aes$6c088ba12b0b261247c8cf331c46d9260b8e58002957d89ad1c0495e3af665cd0227",
"bucket": "test",
"endpoint": "http://127.0.0.1:9000",
"key_prefix": "vfolder/",
"region": "eu-west-1",
"storage_class": "Standard"
}

View file

@ -73,7 +73,8 @@ class SFTPGoApiRequests:
def buildUserObject(self, user_id=0, username="", password="", public_keys=[], home_dir="", uid=0, gid=0,
max_sessions=0, quota_size=0, quota_files=0, permissions={}, upload_bandwidth=0, download_bandwidth=0,
status=1, expiration_date=0, allowed_ip=[], denied_ip=[], fs_provider='local', s3_bucket='',
s3_region='', s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class=''):
s3_region='', s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='',
s3_key_prefix=''):
user = {"id":user_id, "username":username, "uid":uid, "gid":gid,
"max_sessions":max_sessions, "quota_size":quota_size, "quota_files":quota_files,
"upload_bandwidth":upload_bandwidth, "download_bandwidth":download_bandwidth,
@ -92,7 +93,8 @@ class SFTPGoApiRequests:
if allowed_ip or denied_ip:
user.update({"filters":self.buildFilters(allowed_ip, denied_ip)})
user.update({"filesystem":self.buildFsConfig(fs_provider, s3_bucket, s3_region, s3_access_key,
s3_access_secret, s3_endpoint, s3_storage_class)})
s3_access_secret, s3_endpoint, s3_storage_class,
s3_key_prefix)})
return user
def buildPermissions(self, root_perms, subdirs_perms):
@ -127,11 +129,12 @@ class SFTPGoApiRequests:
return filters
def buildFsConfig(self, fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret, s3_endpoint,
s3_storage_class):
s3_storage_class, s3_key_prefix):
fs_config = {'provider':0}
if fs_provider == 'S3':
s3config = {'bucket':s3_bucket, 'region':s3_region, 'access_key':s3_access_key, 'access_secret':
s3_access_secret, 'endpoint':s3_endpoint, 'storage_class':s3_storage_class}
s3_access_secret, 'endpoint':s3_endpoint, 'storage_class':s3_storage_class, 'key_prefix':
s3_key_prefix}
fs_config.update({'provider':1, 's3config':s3config})
return fs_config
@ -147,22 +150,23 @@ class SFTPGoApiRequests:
def addUser(self, username="", password="", public_keys="", home_dir="", uid=0, gid=0, max_sessions=0, quota_size=0,
quota_files=0, perms=[], upload_bandwidth=0, download_bandwidth=0, status=1, expiration_date=0,
subdirs_permissions=[], allowed_ip=[], denied_ip=[], fs_provider='local', s3_bucket='', s3_region='',
s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class=''):
s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='', s3_key_prefix=''):
u = self.buildUserObject(0, username, password, public_keys, home_dir, uid, gid, max_sessions,
quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth,
status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region,
s3_access_key, s3_access_secret, s3_endpoint, s3_storage_class)
s3_access_key, s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix)
r = requests.post(self.userPath, json=u, auth=self.auth, verify=self.verify)
self.printResponse(r)
def updateUser(self, user_id, username="", password="", public_keys="", home_dir="", uid=0, gid=0, max_sessions=0,
quota_size=0, quota_files=0, perms=[], upload_bandwidth=0, download_bandwidth=0, status=1,
expiration_date=0, subdirs_permissions=[], allowed_ip=[], denied_ip=[], fs_provider='local',
s3_bucket='', s3_region='', s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class=''):
s3_bucket='', s3_region='', s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='',
s3_key_prefix=''):
u = self.buildUserObject(user_id, username, password, public_keys, home_dir, uid, gid, max_sessions,
quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth,
status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key,
s3_access_secret, s3_endpoint, s3_storage_class)
s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix)
r = requests.put(urlparse.urljoin(self.userPath, "user/" + str(user_id)), json=u, auth=self.auth, verify=self.verify)
self.printResponse(r)
@ -419,6 +423,9 @@ def addCommonUserArguments(parser):
parser.add_argument('--fs', type=str, default='local', choices=['local', 'S3'],
help='Filesystem provider. Default: %(default)s')
parser.add_argument('--s3-bucket', type=str, default='', help='Default: %(default)s')
parser.add_argument('--s3-key-prefix', type=str, default='', help='Virtual root directory. If non empty only this ' +
'directory and its contents will be available. Cannot start with "/". For example "folder/subfolder/".' +
' Default: %(default)s')
parser.add_argument('--s3-region', type=str, default='', help='Default: %(default)s')
parser.add_argument('--s3-access-key', type=str, default='', help='Default: %(default)s')
parser.add_argument('--s3-access-secret', type=str, default='', help='Default: %(default)s')
@ -527,13 +534,14 @@ if __name__ == '__main__':
args.quota_size, args.quota_files, args.permissions, args.upload_bandwidth, args.download_bandwidth,
args.status, getDatetimeAsMillisSinceEpoch(args.expiration_date), args.subdirs_permissions, args.allowed_ip,
args.denied_ip, args.fs, args.s3_bucket, args.s3_region, args.s3_access_key, args.s3_access_secret,
args.s3_endpoint, args.s3_storage_class)
args.s3_endpoint, args.s3_storage_class, args.s3_key_prefix)
elif args.command == 'update-user':
api.updateUser(args.id, args.username, args.password, args.public_keys, args.home_dir, args.uid, args.gid,
args.max_sessions, args.quota_size, args.quota_files, args.permissions, args.upload_bandwidth,
args.download_bandwidth, args.status, getDatetimeAsMillisSinceEpoch(args.expiration_date),
args.subdirs_permissions, args.allowed_ip, args.denied_ip, args.fs, args.s3_bucket, args.s3_region,
args.s3_access_key, args.s3_access_secret, args.s3_endpoint, args.s3_storage_class)
args.s3_access_key, args.s3_access_secret, args.s3_endpoint, args.s3_storage_class,
args.s3_key_prefix)
elif args.command == 'delete-user':
api.deleteUser(args.id)
elif args.command == 'get-users':

View file

@ -302,7 +302,7 @@ func (c Connection) handleSFTPSetstat(filePath string, request *sftp.Request) er
}
func (c Connection) handleSFTPRename(sourcePath string, targetPath string, request *sftp.Request) error {
if c.fs.GetRelativePath(sourcePath, c.User.GetHomeDir()) == "/" {
if c.fs.GetRelativePath(sourcePath) == "/" {
c.Log(logger.LevelWarn, logSender, "renaming root dir is not allowed")
return sftp.ErrSSHFxPermissionDenied
}
@ -319,7 +319,7 @@ func (c Connection) handleSFTPRename(sourcePath string, targetPath string, reque
}
func (c Connection) handleSFTPRmdir(dirPath string, request *sftp.Request) error {
if c.fs.GetRelativePath(dirPath, c.User.GetHomeDir()) == "/" {
if c.fs.GetRelativePath(dirPath) == "/" {
c.Log(logger.LevelWarn, logSender, "removing root dir is not allowed")
return sftp.ErrSSHFxPermissionDenied
}
@ -348,7 +348,7 @@ func (c Connection) handleSFTPRmdir(dirPath string, request *sftp.Request) error
}
func (c Connection) handleSFTPSymlink(sourcePath string, targetPath string, request *sftp.Request) error {
if c.fs.GetRelativePath(sourcePath, c.User.GetHomeDir()) == "/" {
if c.fs.GetRelativePath(sourcePath) == "/" {
c.Log(logger.LevelWarn, logSender, "symlinking root dir is not allowed")
return sftp.ErrSSHFxPermissionDenied
}

View file

@ -325,7 +325,7 @@ func (c *scpCommand) handleRecursiveDownload(dirPath string, stat os.FileInfo) e
}
var dirs []string
for _, file := range files {
filePath := c.connection.fs.GetRelativePath(c.connection.fs.Join(dirPath, file.Name()), c.connection.User.GetHomeDir())
filePath := c.connection.fs.GetRelativePath(c.connection.fs.Join(dirPath, file.Name()))
if file.Mode().IsRegular() || file.Mode()&os.ModeSymlink == os.ModeSymlink {
err = c.handleDownload(filePath)
if err != nil {

View file

@ -307,7 +307,7 @@ func GetConnectionsStats() []ConnectionStatus {
StartTime: utils.GetTimeAsMsSinceEpoch(t.start),
Size: size,
LastActivity: utils.GetTimeAsMsSinceEpoch(t.lastActivity),
Path: c.fs.GetRelativePath(t.path, c.User.GetHomeDir()),
Path: c.fs.GetRelativePath(t.path),
}
conn.Transfers = append(conn.Transfers, connTransfer)
}

View file

@ -2904,58 +2904,119 @@ func TestRootDirCommands(t *testing.T) {
func TestRelativePaths(t *testing.T) {
user := getTestUser(true)
path := filepath.Join(user.HomeDir, "/")
fs := vfs.NewOsFs("", user.GetHomeDir())
rel := fs.GetRelativePath(path, user.GetHomeDir())
var path, rel string
filesystems := []vfs.Fs{vfs.NewOsFs("", user.GetHomeDir())}
s3config := vfs.S3FsConfig{
KeyPrefix: strings.TrimPrefix(user.GetHomeDir(), "/") + "/",
}
s3fs, _ := vfs.NewS3Fs("", user.GetHomeDir(), s3config)
filesystems = append(filesystems, s3fs)
for _, fs := range filesystems {
path = filepath.Join(user.HomeDir, "/")
rel = fs.GetRelativePath(path)
if rel != "/" {
t.Errorf("Unexpected relative path: %v", rel)
}
path = filepath.Join(user.HomeDir, "//")
rel = fs.GetRelativePath(path, user.GetHomeDir())
rel = fs.GetRelativePath(path)
if rel != "/" {
t.Errorf("Unexpected relative path: %v", rel)
}
path = filepath.Join(user.HomeDir, "../..")
rel = fs.GetRelativePath(path, user.GetHomeDir())
rel = fs.GetRelativePath(path)
if rel != "/" {
t.Errorf("Unexpected relative path: %v", rel)
t.Errorf("Unexpected relative path: %v path: %v", rel, path)
}
path = filepath.Join(user.HomeDir, "../../../../../")
rel = fs.GetRelativePath(path, user.GetHomeDir())
rel = fs.GetRelativePath(path)
if rel != "/" {
t.Errorf("Unexpected relative path: %v", rel)
}
path = filepath.Join(user.HomeDir, "/..")
rel = fs.GetRelativePath(path, user.GetHomeDir())
rel = fs.GetRelativePath(path)
if rel != "/" {
t.Errorf("Unexpected relative path: %v", rel)
t.Errorf("Unexpected relative path: %v path: %v", rel, path)
}
path = filepath.Join(user.HomeDir, "/../../../..")
rel = fs.GetRelativePath(path, user.GetHomeDir())
rel = fs.GetRelativePath(path)
if rel != "/" {
t.Errorf("Unexpected relative path: %v", rel)
}
path = filepath.Join(user.HomeDir, "")
rel = fs.GetRelativePath(path, user.GetHomeDir())
rel = fs.GetRelativePath(path)
if rel != "/" {
t.Errorf("Unexpected relative path: %v", rel)
}
path = filepath.Join(user.HomeDir, ".")
rel = fs.GetRelativePath(path, user.GetHomeDir())
rel = fs.GetRelativePath(path)
if rel != "/" {
t.Errorf("Unexpected relative path: %v", rel)
}
path = filepath.Join(user.HomeDir, "somedir")
rel = fs.GetRelativePath(path, user.GetHomeDir())
rel = fs.GetRelativePath(path)
if rel != "/somedir" {
t.Errorf("Unexpected relative path: %v", rel)
}
path = filepath.Join(user.HomeDir, "/somedir/subdir")
rel = fs.GetRelativePath(path, user.GetHomeDir())
rel = fs.GetRelativePath(path)
if rel != "/somedir/subdir" {
t.Errorf("Unexpected relative path: %v", rel)
}
}
}
func TestResolvePaths(t *testing.T) {
user := getTestUser(true)
var path, resolved string
var err error
filesystems := []vfs.Fs{vfs.NewOsFs("", user.GetHomeDir())}
s3config := vfs.S3FsConfig{
KeyPrefix: strings.TrimPrefix(user.GetHomeDir(), "/") + "/",
}
os.MkdirAll(user.GetHomeDir(), 0777)
s3fs, _ := vfs.NewS3Fs("", user.GetHomeDir(), s3config)
filesystems = append(filesystems, s3fs)
for _, fs := range filesystems {
path = "/"
resolved, _ = fs.ResolvePath(filepath.ToSlash(path))
if resolved != fs.Join(user.GetHomeDir(), "/") {
t.Errorf("Unexpected resolved path: %v for: %v, fs: %v", resolved, path, fs.Name())
}
path = "."
resolved, _ = fs.ResolvePath(filepath.ToSlash(path))
if resolved != fs.Join(user.GetHomeDir(), "/") {
t.Errorf("Unexpected resolved path: %v for: %v, fs: %v", resolved, path, fs.Name())
}
path = "test/sub"
resolved, _ = fs.ResolvePath(filepath.ToSlash(path))
if resolved != fs.Join(user.GetHomeDir(), "/test/sub") {
t.Errorf("Unexpected resolved path: %v for: %v, fs: %v", resolved, path, fs.Name())
}
path = "../test/sub"
resolved, err = fs.ResolvePath(filepath.ToSlash(path))
if fs.Name() == "osfs" {
if err == nil {
t.Errorf("Unexpected resolved path: %v for: %v, fs: %v", resolved, path, fs.Name())
}
} else {
if resolved != fs.Join(user.GetHomeDir(), "/test/sub") && err == nil {
t.Errorf("Unexpected resolved path: %v for: %v, fs: %v", resolved, path, fs.Name())
}
}
path = "../../../test/../sub"
resolved, err = fs.ResolvePath(filepath.ToSlash(path))
if fs.Name() == "osfs" {
if err == nil {
t.Errorf("Unexpected resolved path: %v for: %v, fs: %v", resolved, path, fs.Name())
}
} else {
if resolved != fs.Join(user.GetHomeDir(), "/sub") && err == nil {
t.Errorf("Unexpected resolved path: %v for: %v, fs: %v", resolved, path, fs.Name())
}
}
}
os.RemoveAll(user.GetHomeDir())
}
func TestUserPerms(t *testing.T) {
user := getTestUser(true)

View file

@ -243,6 +243,17 @@
</div>
</div>
<div class="form-group row">
<label for="idS3KeyPrefix" class="col-sm-2 col-form-label">S3 Key Prefix</label>
<div class="col-sm-10">
<input type="text" class="form-control" id="idS3KeyPrefix" name="s3_key_prefix" placeholder=""
value="{{.User.FsConfig.S3Config.KeyPrefix}}" maxlength="255" aria-describedby="S3KeyPrefixHelpBlock">
<small id="S3KeyPrefixHelpBlock" class="form-text text-muted">
Similar to a chroot for local filesystem. Cannot start with "/". Example: "somedir/subdir/".
</small>
</div>
</div>
<input type="hidden" name="expiration_date" id="hidden_start_datetime" value="">
<button type="submit" class="btn btn-primary float-right mt-3 mb-5 px-5 px-3">Submit</button>
</form>

View file

@ -180,8 +180,8 @@ func (OsFs) GetAtomicUploadPath(name string) string {
// GetRelativePath returns the path for a file relative to the user's home dir.
// This is the path as seen by SFTP users
func (OsFs) GetRelativePath(name, rootPath string) string {
rel, err := filepath.Rel(rootPath, filepath.Clean(name))
func (fs OsFs) GetRelativePath(name string) string {
rel, err := filepath.Rel(fs.rootDir, filepath.Clean(name))
if err != nil {
return ""
}

View file

@ -23,6 +23,13 @@ import (
// S3FsConfig defines the configuration for S3fs
type S3FsConfig struct {
Bucket string `json:"bucket,omitempty"`
// KeyPrefix is similar to a chroot directory for local filesystem.
// If specified the SFTP user will only see contents that starts with
// this prefix and so you can restrict access to a specific virtual
// folder. The prefix, if not empty, must not start with "/" and must
// end with "/".
//If empty the whole bucket contents will be available
KeyPrefix string `json:"key_prefix,omitempty"`
Region string `json:"region,omitempty"`
AccessKey string `json:"access_key,omitempty"`
AccessSecret string `json:"access_secret,omitempty"`
@ -95,6 +102,9 @@ func (fs S3Fs) Stat(name string) (os.FileInfo, error) {
}
return NewS3FileInfo(name, true, 0, time.Time{}), nil
}
if "/"+fs.config.KeyPrefix == name+"/" {
return NewS3FileInfo(name, true, 0, time.Time{}), nil
}
prefix := path.Dir(name)
if prefix == "/" || prefix == "." {
prefix = ""
@ -403,7 +413,7 @@ func (fs S3Fs) ScanRootDirContents() (int, int64, error) {
defer cancelFn()
err := fs.svc.ListObjectsV2PagesWithContext(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(fs.config.Bucket),
Prefix: aws.String(""),
Prefix: aws.String(fs.config.KeyPrefix),
}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, fileObject := range page.Contents {
numFiles++
@ -423,14 +433,20 @@ func (S3Fs) GetAtomicUploadPath(name string) string {
// GetRelativePath returns the path for a file relative to the user's home dir.
// This is the path as seen by SFTP users
func (S3Fs) GetRelativePath(name, rootPath string) string {
rel := name
if name == "." {
func (fs S3Fs) GetRelativePath(name string) string {
rel := path.Clean(name)
if rel == "." {
rel = ""
}
if !strings.HasPrefix(rel, "/") {
return "/" + rel
}
if len(fs.config.KeyPrefix) > 0 {
if !strings.HasPrefix(rel, "/"+fs.config.KeyPrefix) {
rel = "/"
}
rel = path.Clean("/" + strings.TrimPrefix(rel, "/"+fs.config.KeyPrefix))
}
return rel
}
@ -441,7 +457,10 @@ func (S3Fs) Join(elem ...string) string {
// ResolvePath returns the matching filesystem path for the specified sftp path
func (fs S3Fs) ResolvePath(sftpPath string) (string, error) {
return sftpPath, nil
if !path.IsAbs(sftpPath) {
sftpPath = path.Clean("/" + sftpPath)
}
return fs.Join("/", fs.config.KeyPrefix, sftpPath), nil
}
func (fs *S3Fs) resolve(name *string, prefix string) (string, bool) {

View file

@ -4,7 +4,9 @@ package vfs
import (
"errors"
"os"
"path"
"runtime"
"strings"
"time"
"github.com/drakkan/sftpgo/logger"
@ -36,7 +38,7 @@ type Fs interface {
IsPermission(err error) bool
ScanRootDirContents() (int, int64, error)
GetAtomicUploadPath(name string) string
GetRelativePath(name, rootPath string) string
GetRelativePath(name string) string
Join(elem ...string) string
}
@ -80,6 +82,15 @@ func ValidateS3FsConfig(config *S3FsConfig) error {
if len(config.AccessSecret) == 0 {
return errors.New("access_secret cannot be empty")
}
if len(config.KeyPrefix) > 0 {
if strings.HasPrefix(config.KeyPrefix, "/") {
return errors.New("key_prefix cannot start with /")
}
config.KeyPrefix = path.Clean(config.KeyPrefix)
if !strings.HasSuffix(config.KeyPrefix, "/") {
config.KeyPrefix += "/"
}
}
return nil
}