Browse Source

sftpfs: add buffering support

this way we improve performance over high latency networks
Nicola Murino 4 năm trước cách đây
mục cha
commit
ea26d7786c

+ 2 - 0
common/protocol_test.go

@@ -2144,6 +2144,8 @@ func TestSFTPLoopError(t *testing.T) {
 	assert.NoError(t, err)
 	err = os.RemoveAll(user2.GetHomeDir())
 	assert.NoError(t, err)
+	_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: "sftp"}, http.StatusOK)
+	assert.NoError(t, err)
 }
 
 func TestNonLocalCrossRename(t *testing.T) {

+ 5 - 2
common/transfer.go

@@ -148,9 +148,12 @@ func (t *BaseTransfer) Truncate(fsPath string, size int64) (int64, error) {
 		}
 		if size == 0 && atomic.LoadInt64(&t.BytesSent) == 0 {
 			// for cloud providers the file is always truncated to zero, we don't support append/resume for uploads
-			return 0, nil
+			// for buffered SFTP we can have buffered bytes so we returns an error
+			if !vfs.IsBufferedSFTPFs(t.Fs) {
+				return 0, nil
+			}
 		}
-		return 0, ErrOpUnsupported
+		return 0, vfs.ErrVfsUnsupported
 	}
 	return 0, errTransferMismatch
 }

+ 1 - 1
common/transfer_test.go

@@ -164,7 +164,7 @@ func TestTruncate(t *testing.T) {
 	_, err = transfer.Truncate(testFile, 0)
 	assert.NoError(t, err)
 	_, err = transfer.Truncate(testFile, 1)
-	assert.EqualError(t, err, ErrOpUnsupported.Error())
+	assert.EqualError(t, err, vfs.ErrVfsUnsupported.Error())
 
 	err = transfer.Close()
 	assert.NoError(t, err)

+ 1 - 1
dataprovider/user.go

@@ -230,7 +230,7 @@ func (u *User) getRootFs(connectionID string) (fs vfs.Fs, err error) {
 			return nil, err
 		}
 		forbiddenSelfUsers = append(forbiddenSelfUsers, u.Username)
-		return vfs.NewSFTPFs(connectionID, "", forbiddenSelfUsers, u.FsConfig.SFTPConfig)
+		return vfs.NewSFTPFs(connectionID, "", u.GetHomeDir(), forbiddenSelfUsers, u.FsConfig.SFTPConfig)
 	default:
 		return vfs.NewOsFs(connectionID, u.GetHomeDir(), ""), nil
 	}

+ 1 - 1
docs/dare.md

@@ -12,7 +12,7 @@ The passphrase is stored encrypted itself according to your [KMS configuration](
 
 The encrypted filesystem has some limitations compared to the local, unencrypted, one:
 
-- Upload resume is not supported.
+- Resuming uploads is not supported.
 - Opening a file for both reading and writing at the same time is not supported and so clients that require advanced filesystem-like features such as `sshfs` are not supported too.
 - Truncate is not supported.
 - System commands such as `git` or `rsync` are not supported: they will store data unencrypted.

+ 1 - 1
docs/s3.md

@@ -23,7 +23,7 @@ Some SFTP commands don't work over S3:
 - `chtimes`, `chown` and `chmod` will fail. If you want to silently ignore these method set `setstat_mode` to `1` or `2` in your configuration file
 - `truncate`, `symlink`, `readlink` are not supported
 - opening a file for both reading and writing at the same time is not supported
-- upload resume is not supported
+- resuming uploads is not supported
 - upload mode `atomic` is ignored since S3 uploads are already atomic
 
 Other notes:

+ 5 - 0
docs/sftpfs.md

@@ -10,6 +10,7 @@ Here are the supported configuration parameters:
 - `PrivateKey`
 - `Fingerprints`
 - `Prefix`
+- `BufferSize`
 
 The mandatory parameters are the endpoint, the username and a password or a private key. If you define both a password and a private key the key is tried first. The provided private key should be PEM encoded, something like this:
 
@@ -28,3 +29,7 @@ The password and the private key are stored as ciphertext according to your [KMS
 SHA256 fingerprints for remote server host keys are optional but highly recommended: if you provide one or more fingerprints the server host key will be verified against them and the connection will be denied if none of the fingerprints provided match that for the server host key.
 
 Specifying a prefix you can restrict all operations to a given path within the remote SFTP server.
+
+Buffering can be enabled by setting a buffer size (in MB) greater than 0. By enabling buffering, the reads and writes, from/to the remote SFTP server, are split in multiple concurrent requests and this allows data to be transferred at a faster rate, over high latency networks, by overlapping round-trip times. With buffering enabled, resuming uploads and trucate are not supported and a file cannot be opened for both reading and writing at the same time. 0 means disabled.
+
+Some SFTP servers (eg. AWS Transfer) do not support opening files read/write at the same time, you can enable buffering to work with them.

+ 1 - 1
ftpd/cryptfs_test.go

@@ -164,7 +164,7 @@ func TestResumeCryptFs(t *testing.T) {
 		assert.NoError(t, err)
 		err = ftpUploadFile(testFilePath, testFileName, int64(len(data)), client, 0)
 		assert.NoError(t, err)
-		// upload resume is not supported
+		// resuming uploads is not supported
 		err = ftpUploadFile(testFilePath, testFileName, int64(len(data)+5), client, 5)
 		assert.Error(t, err)
 		localDownloadPath := filepath.Join(homeBasePath, testDLFileName)

+ 82 - 0
ftpd/ftpd_test.go

@@ -987,6 +987,88 @@ func TestUploadErrors(t *testing.T) {
 	assert.NoError(t, err)
 }
 
+func TestSFTPBuffered(t *testing.T) {
+	u := getTestUser()
+	localUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
+	assert.NoError(t, err)
+	u = getTestSFTPUser()
+	u.QuotaFiles = 100
+	u.FsConfig.SFTPConfig.BufferSize = 2
+	u.HomeDir = filepath.Join(os.TempDir(), u.Username)
+	sftpUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
+	assert.NoError(t, err)
+	client, err := getFTPClient(sftpUser, true, nil)
+	if assert.NoError(t, err) {
+		testFilePath := filepath.Join(homeBasePath, testFileName)
+		testFileSize := int64(65535)
+		expectedQuotaSize := testFileSize
+		expectedQuotaFiles := 1
+		err = createTestFile(testFilePath, testFileSize)
+		assert.NoError(t, err)
+		err = checkBasicFTP(client)
+		assert.NoError(t, err)
+		err = ftpUploadFile(testFilePath, testFileName, testFileSize, client, 0)
+		assert.NoError(t, err)
+		// overwrite an existing file
+		err = ftpUploadFile(testFilePath, testFileName, testFileSize, client, 0)
+		assert.NoError(t, err)
+		localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
+		err = ftpDownloadFile(testFileName, localDownloadPath, testFileSize, client, 0)
+		assert.NoError(t, err)
+		user, _, err := httpdtest.GetUserByUsername(sftpUser.Username, http.StatusOK)
+		assert.NoError(t, err)
+		assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
+		assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
+
+		data := []byte("test data")
+		err = os.WriteFile(testFilePath, data, os.ModePerm)
+		assert.NoError(t, err)
+		err = ftpUploadFile(testFilePath, testFileName, int64(len(data)), client, 0)
+		assert.NoError(t, err)
+		err = ftpUploadFile(testFilePath, testFileName, int64(len(data)+5), client, 5)
+		if assert.Error(t, err) {
+			assert.Contains(t, err.Error(), "operation unsupported")
+		}
+		err = ftpDownloadFile(testFileName, localDownloadPath, int64(4), client, 5)
+		assert.NoError(t, err)
+		readed, err := os.ReadFile(localDownloadPath)
+		assert.NoError(t, err)
+		assert.Equal(t, []byte("data"), readed)
+		// try to append to a file, it should fail
+		// now append to a file
+		srcFile, err := os.Open(testFilePath)
+		if assert.NoError(t, err) {
+			err = client.Append(testFileName, srcFile)
+			if assert.Error(t, err) {
+				assert.Contains(t, err.Error(), "operation unsupported")
+			}
+			err = srcFile.Close()
+			assert.NoError(t, err)
+			size, err := client.FileSize(testFileName)
+			assert.NoError(t, err)
+			assert.Equal(t, int64(len(data)), size)
+			err = ftpDownloadFile(testFileName, localDownloadPath, int64(len(data)), client, 0)
+			assert.NoError(t, err)
+		}
+
+		err = os.Remove(testFilePath)
+		assert.NoError(t, err)
+		err = os.Remove(localDownloadPath)
+		assert.NoError(t, err)
+		err = client.Quit()
+		assert.NoError(t, err)
+	}
+
+	_, err = httpdtest.RemoveUser(sftpUser, http.StatusOK)
+	assert.NoError(t, err)
+	_, err = httpdtest.RemoveUser(localUser, http.StatusOK)
+	assert.NoError(t, err)
+	err = os.RemoveAll(localUser.GetHomeDir())
+	assert.NoError(t, err)
+	err = os.RemoveAll(sftpUser.GetHomeDir())
+	assert.NoError(t, err)
+}
+
 func TestResume(t *testing.T) {
 	u := getTestUser()
 	localUser, _, err := httpdtest.AddUser(u, http.StatusCreated)

+ 3 - 3
ftpd/handler.go

@@ -415,12 +415,12 @@ func (c *Connection) handleFTPUploadToExistingFile(fs vfs.Fs, flags int, resolve
 
 	initialSize := int64(0)
 	if isResume {
-		c.Log(logger.LevelDebug, "upload resume requested, file path: %#v initial size: %v", filePath, fileSize)
+		c.Log(logger.LevelDebug, "resuming upload requested, file path: %#v initial size: %v", filePath, fileSize)
 		minWriteOffset = fileSize
 		initialSize = fileSize
-		if vfs.IsSFTPFs(fs) {
+		if vfs.IsSFTPFs(fs) && fs.IsUploadResumeSupported() {
 			// we need this since we don't allow resume with wrong offset, we should fix this in pkg/sftp
-			file.Seek(initialSize, io.SeekStart) //nolint:errcheck // for sftp seek cannot file, it simply set the offset
+			file.Seek(initialSize, io.SeekStart) //nolint:errcheck // for sftp seek simply set the offset
 		}
 	} else {
 		if vfs.IsLocalOrSFTPFs(fs) {

+ 1 - 1
ftpd/internal_test.go

@@ -307,7 +307,7 @@ func (fs MockOsFs) Name() string {
 	return "mockOsFs"
 }
 
-// IsUploadResumeSupported returns true if upload resume is supported
+// IsUploadResumeSupported returns true if resuming uploads is supported
 func (MockOsFs) IsUploadResumeSupported() bool {
 	return false
 }

+ 18 - 0
httpd/httpd_test.go

@@ -690,6 +690,19 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
 	u.FsConfig.SFTPConfig.PrivateKey = kms.NewSecret(kms.SecretStatusRedacted, "keyforpkey", "", "")
 	_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
 	assert.NoError(t, err)
+	u.FsConfig.SFTPConfig.PrivateKey = kms.NewPlainSecret("pk")
+	u.FsConfig.SFTPConfig.Endpoint = "127.1.1.1:22"
+	u.FsConfig.SFTPConfig.Username = defaultUsername
+	u.FsConfig.SFTPConfig.BufferSize = -1
+	_, resp, err := httpdtest.AddUser(u, http.StatusBadRequest)
+	if assert.NoError(t, err) {
+		assert.Contains(t, string(resp), "invalid buffer_size")
+	}
+	u.FsConfig.SFTPConfig.BufferSize = 1000
+	_, resp, err = httpdtest.AddUser(u, http.StatusBadRequest)
+	if assert.NoError(t, err) {
+		assert.Contains(t, string(resp), "invalid buffer_size")
+	}
 }
 
 func TestUserRedactedPassword(t *testing.T) {
@@ -1545,6 +1558,7 @@ func TestUserSFTPFs(t *testing.T) {
 	user.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("sftp_pwd")
 	user.FsConfig.SFTPConfig.PrivateKey = kms.NewPlainSecret(sftpPrivateKey)
 	user.FsConfig.SFTPConfig.Fingerprints = []string{sftpPkeyFingerprint}
+	user.FsConfig.SFTPConfig.BufferSize = 2
 	_, resp, err := httpdtest.UpdateUser(user, http.StatusBadRequest, "")
 	assert.NoError(t, err)
 	assert.Contains(t, string(resp), "invalid endpoint")
@@ -1555,6 +1569,7 @@ func TestUserSFTPFs(t *testing.T) {
 	assert.NoError(t, err)
 	assert.Equal(t, "/", user.FsConfig.SFTPConfig.Prefix)
 	assert.True(t, user.FsConfig.SFTPConfig.DisableCouncurrentReads)
+	assert.Equal(t, int64(2), user.FsConfig.SFTPConfig.BufferSize)
 	initialPwdPayload := user.FsConfig.SFTPConfig.Password.GetPayload()
 	initialPkeyPayload := user.FsConfig.SFTPConfig.PrivateKey.GetPayload()
 	assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.SFTPConfig.Password.GetStatus())
@@ -6079,6 +6094,7 @@ func TestWebUserSFTPFsMock(t *testing.T) {
 	user.FsConfig.SFTPConfig.Fingerprints = []string{sftpPkeyFingerprint}
 	user.FsConfig.SFTPConfig.Prefix = "/home/sftpuser"
 	user.FsConfig.SFTPConfig.DisableCouncurrentReads = true
+	user.FsConfig.SFTPConfig.BufferSize = 5
 	form := make(url.Values)
 	form.Set(csrfFormToken, csrfToken)
 	form.Set("username", user.Username)
@@ -6116,6 +6132,7 @@ func TestWebUserSFTPFsMock(t *testing.T) {
 	form.Set("sftp_fingerprints", user.FsConfig.SFTPConfig.Fingerprints[0])
 	form.Set("sftp_prefix", user.FsConfig.SFTPConfig.Prefix)
 	form.Set("sftp_disable_concurrent_reads", "true")
+	form.Set("sftp_buffer_size", strconv.FormatInt(user.FsConfig.SFTPConfig.BufferSize, 10))
 	b, contentType, _ = getMultipartFormData(form, "", "")
 	req, _ = http.NewRequest(http.MethodPost, path.Join(webUserPath, user.Username), &b)
 	setJWTCookieForReq(req, webToken)
@@ -6144,6 +6161,7 @@ func TestWebUserSFTPFsMock(t *testing.T) {
 	assert.Equal(t, updateUser.FsConfig.SFTPConfig.Endpoint, user.FsConfig.SFTPConfig.Endpoint)
 	assert.True(t, updateUser.FsConfig.SFTPConfig.DisableCouncurrentReads)
 	assert.Len(t, updateUser.FsConfig.SFTPConfig.Fingerprints, 1)
+	assert.Equal(t, user.FsConfig.SFTPConfig.BufferSize, updateUser.FsConfig.SFTPConfig.BufferSize)
 	assert.Contains(t, updateUser.FsConfig.SFTPConfig.Fingerprints, sftpPkeyFingerprint)
 	// now check that a redacted credentials are not saved
 	form.Set("sftp_password", redactedSecret+" ")

+ 3 - 0
httpd/schema/openapi.yaml

@@ -1613,6 +1613,9 @@ components:
         disable_concurrent_reads:
           type: boolean
           description: Concurrent reads are safe to use and disabling them will degrade performance. Some servers automatically delete files once they are downloaded. Using concurrent reads is problematic with such servers.
+        buffer_size:
+          type: intger
+          description: The size of the buffer (in MB) to use for transfers. By enabling buffering, the reads and writes, from/to the remote SFTP server, are split in multiple concurrent requests and this allows data to be transferred at a faster rate, over high latency networks, by overlapping round-trip times. With buffering enabled, resuming uploads is not supported and a file cannot be opened for both reading and writing at the same time. 0 means disabled.
     FilesystemConfig:
       type: object
       properties:

+ 9 - 3
httpd/web.go

@@ -727,7 +727,8 @@ func getGCSConfig(r *http.Request) (vfs.GCSFsConfig, error) {
 	return config, err
 }
 
-func getSFTPConfig(r *http.Request) vfs.SFTPFsConfig {
+func getSFTPConfig(r *http.Request) (vfs.SFTPFsConfig, error) {
+	var err error
 	config := vfs.SFTPFsConfig{}
 	config.Endpoint = r.Form.Get("sftp_endpoint")
 	config.Username = r.Form.Get("sftp_username")
@@ -737,7 +738,8 @@ func getSFTPConfig(r *http.Request) vfs.SFTPFsConfig {
 	config.Fingerprints = getSliceFromDelimitedValues(fingerprintsFormValue, "\n")
 	config.Prefix = r.Form.Get("sftp_prefix")
 	config.DisableCouncurrentReads = len(r.Form.Get("sftp_disable_concurrent_reads")) > 0
-	return config
+	config.BufferSize, err = strconv.ParseInt(r.Form.Get("sftp_buffer_size"), 10, 64)
+	return config, err
 }
 
 func getAzureConfig(r *http.Request) (vfs.AzBlobFsConfig, error) {
@@ -788,7 +790,11 @@ func getFsConfigFromPostFields(r *http.Request) (vfs.Filesystem, error) {
 	case vfs.CryptedFilesystemProvider:
 		fs.CryptConfig.Passphrase = getSecretFromFormField(r, "crypt_passphrase")
 	case vfs.SFTPFilesystemProvider:
-		fs.SFTPConfig = getSFTPConfig(r)
+		config, err := getSFTPConfig(r)
+		if err != nil {
+			return fs, err
+		}
+		fs.SFTPConfig = config
 	}
 	return fs, nil
 }

+ 3 - 0
httpdtest/httpdtest.go

@@ -1046,6 +1046,9 @@ func compareSFTPFsConfig(expected *vfs.Filesystem, actual *vfs.Filesystem) error
 	if expected.SFTPConfig.DisableCouncurrentReads != actual.SFTPConfig.DisableCouncurrentReads {
 		return errors.New("SFTPFs disable_concurrent_reads mismatch")
 	}
+	if expected.SFTPConfig.BufferSize != actual.SFTPConfig.BufferSize {
+		return errors.New("SFTPFs buffer_size mismatch")
+	}
 	if err := checkEncryptedSecret(expected.SFTPConfig.Password, actual.SFTPConfig.Password); err != nil {
 		return fmt.Errorf("SFTPFs password mismatch: %v", err)
 	}

+ 1 - 1
sftpd/cryptfs_test.go

@@ -159,7 +159,7 @@ func TestEmptyFile(t *testing.T) {
 }
 
 func TestUploadResumeCryptFs(t *testing.T) {
-	// upload resume is not supported
+	// resuming uploads is not supported
 	usePubKey := true
 	u := getTestUserWithCryptFs(usePubKey)
 	user, _, err := httpdtest.AddUser(u, http.StatusCreated)

+ 2 - 2
sftpd/handler.go

@@ -101,7 +101,7 @@ func (c *Connection) handleFilewrite(request *sftp.Request) (sftp.WriterAtReader
 	}
 
 	var errForRead error
-	if !vfs.IsLocalOrSFTPFs(fs) && request.Pflags().Read {
+	if !vfs.HasOpenRWSupport(fs) && request.Pflags().Read {
 		// read and write mode is only supported for local filesystem
 		errForRead = sftp.ErrSSHFxOpUnsupported
 	}
@@ -383,7 +383,7 @@ func (c *Connection) handleSFTPUploadToExistingFile(fs vfs.Fs, pflags sftp.FileO
 
 	initialSize := int64(0)
 	if isResume {
-		c.Log(logger.LevelDebug, "upload resume requested, file path %#v initial size: %v", filePath, fileSize)
+		c.Log(logger.LevelDebug, "resuming upload requested, file path %#v initial size: %v", filePath, fileSize)
 		minWriteOffset = fileSize
 		initialSize = fileSize
 	} else {

+ 1 - 1
sftpd/internal_test.go

@@ -81,7 +81,7 @@ func (fs MockOsFs) Name() string {
 	return "mockOsFs"
 }
 
-// IsUploadResumeSupported returns true if upload resume is supported
+// IsUploadResumeSupported returns true if resuming uploads is supported
 func (MockOsFs) IsUploadResumeSupported() bool {
 	return false
 }

+ 118 - 5
sftpd/sftpd_test.go

@@ -745,6 +745,114 @@ func TestRealPath(t *testing.T) {
 	assert.NoError(t, err)
 }
 
+func TestBufferedSFTP(t *testing.T) {
+	usePubKey := false
+	u := getTestUser(usePubKey)
+	localUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
+	assert.NoError(t, err)
+	err = os.RemoveAll(localUser.GetHomeDir())
+	assert.NoError(t, err)
+	u = getTestSFTPUser(usePubKey)
+	u.FsConfig.SFTPConfig.BufferSize = 2
+	u.HomeDir = filepath.Join(os.TempDir(), u.Username)
+	sftpUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
+	assert.NoError(t, err)
+	client, err := getSftpClient(sftpUser, usePubKey)
+	if assert.NoError(t, err) {
+		defer client.Close()
+		testFilePath := filepath.Join(homeBasePath, testFileName)
+		testFileSize := int64(65535)
+		appendDataSize := int64(65535)
+		err = createTestFile(testFilePath, testFileSize)
+		assert.NoError(t, err)
+		initialHash, err := computeHashForFile(sha256.New(), testFilePath)
+		assert.NoError(t, err)
+
+		err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
+		assert.NoError(t, err)
+		err = appendToTestFile(testFilePath, appendDataSize)
+		assert.NoError(t, err)
+		err = sftpUploadResumeFile(testFilePath, testFileName, testFileSize+appendDataSize, false, client)
+		if assert.Error(t, err) {
+			assert.Contains(t, err.Error(), "SSH_FX_OP_UNSUPPORTED")
+		}
+		localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
+		err = sftpDownloadFile(testFileName, localDownloadPath, testFileSize, client)
+		assert.NoError(t, err)
+		downloadedFileHash, err := computeHashForFile(sha256.New(), localDownloadPath)
+		assert.NoError(t, err)
+		assert.Equal(t, initialHash, downloadedFileHash)
+		err = os.Remove(testFilePath)
+		assert.NoError(t, err)
+		err = os.Remove(localDownloadPath)
+		assert.NoError(t, err)
+
+		sftpFile, err := client.OpenFile(testFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
+		if assert.NoError(t, err) {
+			testData := []byte("sample test sftp data")
+			n, err := sftpFile.Write(testData)
+			assert.NoError(t, err)
+			assert.Equal(t, len(testData), n)
+			err = sftpFile.Truncate(0)
+			if assert.Error(t, err) {
+				assert.Contains(t, err.Error(), "SSH_FX_OP_UNSUPPORTED")
+			}
+			err = sftpFile.Truncate(4)
+			if assert.Error(t, err) {
+				assert.Contains(t, err.Error(), "SSH_FX_OP_UNSUPPORTED")
+			}
+			buffer := make([]byte, 128)
+			_, err = sftpFile.Read(buffer)
+			if assert.Error(t, err) {
+				assert.Contains(t, err.Error(), "SSH_FX_OP_UNSUPPORTED")
+			}
+			err = sftpFile.Close()
+			assert.NoError(t, err)
+			info, err := client.Stat(testFileName)
+			if assert.NoError(t, err) {
+				assert.Equal(t, int64(len(testData)), info.Size())
+			}
+		}
+		// test WriteAt
+		sftpFile, err = client.OpenFile(testFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
+		if assert.NoError(t, err) {
+			testData := []byte("hello world")
+			n, err := sftpFile.WriteAt(testData[:6], 0)
+			assert.NoError(t, err)
+			assert.Equal(t, 6, n)
+			n, err = sftpFile.WriteAt(testData[6:], 6)
+			assert.NoError(t, err)
+			assert.Equal(t, 5, n)
+			err = sftpFile.Close()
+			assert.NoError(t, err)
+			info, err := client.Stat(testFileName)
+			if assert.NoError(t, err) {
+				assert.Equal(t, int64(len(testData)), info.Size())
+			}
+		}
+		// test ReadAt
+		sftpFile, err = client.OpenFile(testFileName, os.O_RDONLY)
+		if assert.NoError(t, err) {
+			buffer := make([]byte, 128)
+			n, err := sftpFile.ReadAt(buffer, 6)
+			assert.ErrorIs(t, err, io.EOF)
+			assert.Equal(t, 5, n)
+			assert.Equal(t, []byte("world"), buffer[:n])
+			err = sftpFile.Close()
+			assert.NoError(t, err)
+		}
+	}
+
+	_, err = httpdtest.RemoveUser(sftpUser, http.StatusOK)
+	assert.NoError(t, err)
+	_, err = httpdtest.RemoveUser(localUser, http.StatusOK)
+	assert.NoError(t, err)
+	err = os.RemoveAll(localUser.GetHomeDir())
+	assert.NoError(t, err)
+	err = os.RemoveAll(sftpUser.GetHomeDir())
+	assert.NoError(t, err)
+}
+
 func TestUploadResume(t *testing.T) {
 	usePubKey := false
 	u := getTestUser(usePubKey)
@@ -779,7 +887,7 @@ func TestUploadResume(t *testing.T) {
 			assert.NoError(t, err)
 			assert.Equal(t, initialHash, downloadedFileHash)
 			err = sftpUploadResumeFile(testFilePath, testFileName, testFileSize+appendDataSize, true, client)
-			assert.Error(t, err, "file upload resume with invalid offset must fail")
+			assert.Error(t, err, "resume uploading file with invalid offset must fail")
 			err = os.Remove(testFilePath)
 			assert.NoError(t, err)
 			err = os.Remove(localDownloadPath)
@@ -3487,6 +3595,7 @@ func TestSFTPLoopSimple(t *testing.T) {
 
 func TestSFTPLoopVirtualFolders(t *testing.T) {
 	usePubKey := false
+	sftpFloderName := "sftp"
 	user1 := getTestUser(usePubKey)
 	user2 := getTestSFTPUser(usePubKey)
 	user3 := getTestSFTPUser(usePubKey)
@@ -3498,7 +3607,7 @@ func TestSFTPLoopVirtualFolders(t *testing.T) {
 	// user2 has user1 as SFTP fs
 	user1.VirtualFolders = append(user1.VirtualFolders, vfs.VirtualFolder{
 		BaseVirtualFolder: vfs.BaseVirtualFolder{
-			Name: "sftp",
+			Name: sftpFloderName,
 			FsConfig: vfs.Filesystem{
 				Provider: vfs.SFTPFilesystemProvider,
 				SFTPConfig: vfs.SFTPFsConfig{
@@ -3550,7 +3659,7 @@ func TestSFTPLoopVirtualFolders(t *testing.T) {
 	user2.FsConfig.SFTPConfig = vfs.SFTPFsConfig{}
 	user2.VirtualFolders = append(user2.VirtualFolders, vfs.VirtualFolder{
 		BaseVirtualFolder: vfs.BaseVirtualFolder{
-			Name: "sftp",
+			Name: sftpFloderName,
 			FsConfig: vfs.Filesystem{
 				Provider: vfs.SFTPFilesystemProvider,
 				SFTPConfig: vfs.SFTPFsConfig{
@@ -3588,6 +3697,8 @@ func TestSFTPLoopVirtualFolders(t *testing.T) {
 	assert.NoError(t, err)
 	err = os.RemoveAll(user3.GetHomeDir())
 	assert.NoError(t, err)
+	_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: sftpFloderName}, http.StatusOK)
+	assert.NoError(t, err)
 }
 
 func TestNestedVirtualFolders(t *testing.T) {
@@ -6200,7 +6311,7 @@ func TestRelativePaths(t *testing.T) {
 		Password: kms.NewPlainSecret(defaultPassword),
 		Prefix:   keyPrefix,
 	}
-	sftpfs, _ := vfs.NewSFTPFs("", "", []string{user.Username}, sftpconfig)
+	sftpfs, _ := vfs.NewSFTPFs("", "", os.TempDir(), []string{user.Username}, sftpconfig)
 	if runtime.GOOS != osWindows {
 		filesystems = append(filesystems, s3fs, gcsfs, sftpfs)
 	}
@@ -6317,6 +6428,8 @@ func TestVirtualRelativePaths(t *testing.T) {
 	assert.Equal(t, "/vdir/file.txt", rel)
 	rel = fsRoot.GetRelativePath(filepath.Join(user.HomeDir, "vdir1/file.txt"))
 	assert.Equal(t, "/vdir1/file.txt", rel)
+	err = os.RemoveAll(mappedPath)
+	assert.NoError(t, err)
 }
 
 func TestUserPerms(t *testing.T) {
@@ -8922,7 +9035,7 @@ func sftpUploadFile(localSourcePath string, remoteDestPath string, expectedSize
 	return err
 }
 
-func sftpUploadResumeFile(localSourcePath string, remoteDestPath string, expectedSize int64, invalidOffset bool,
+func sftpUploadResumeFile(localSourcePath string, remoteDestPath string, expectedSize int64, invalidOffset bool, //nolint:unparam
 	client *sftp.Client) error {
 	srcFile, err := os.Open(localSourcePath)
 	if err != nil {

+ 1 - 1
templates/folders.html

@@ -239,7 +239,7 @@ function deleteAction() {
                 }
             ],
             "scrollX": false,
-            "scrollY": "50vh",
+            "scrollY": false,
             "responsive": true,
             "order": [[0, 'asc']]
         });

+ 13 - 4
templates/fsconfig.html

@@ -248,16 +248,25 @@
         </small>
     </div>
     <div class="col-sm-2"></div>
-    <label for="idSFTPUsername" class="col-sm-2 col-form-label">Username</label>
+    <label for="idSFTPUploadBufferSize" class="col-sm-2 col-form-label">Buffer size (MB)</label>
     <div class="col-sm-3">
-        <input type="text" class="form-control" id="idSFTPUsername" name="sftp_username" placeholder=""
-            value="{{.SFTPConfig.Username}}" maxlength="255">
+        <input type="number" class="form-control" id="idSFTPBufferSize" name="sftp_buffer_size" placeholder=""
+            value="{{.SFTPConfig.BufferSize}}" min="0" max="64" aria-describedby="SFTPBufferHelpBlock">
+        <small id="SFTPBufferHelpBlock" class="form-text text-muted">
+            A buffer size > 0 enables concurrent transfers
+        </small>
     </div>
 </div>
 
 <div class="form-group row sftp">
+    <label for="idSFTPUsername" class="col-sm-2 col-form-label">Username</label>
+    <div class="col-sm-3">
+        <input type="text" class="form-control" id="idSFTPUsername" name="sftp_username" placeholder=""
+            value="{{.SFTPConfig.Username}}" maxlength="255">
+    </div>
+    <div class="col-sm-2"></div>
     <label for="idSFTPPassword" class="col-sm-2 col-form-label">Password</label>
-    <div class="col-sm-10">
+    <div class="col-sm-3">
         <input type="password" class="form-control" id="idSFTPPassword" name="sftp_password" placeholder=""
             value="{{if .SFTPConfig.Password.IsEncrypted}}{{.RedactedSecret}}{{else}}{{.SFTPConfig.Password.GetPayload}}{{end}}"
             maxlength="1000">

+ 1 - 1
templates/users.html

@@ -260,7 +260,7 @@
                 }
             ],
             "scrollX": false,
-            "scrollY": "50vh",
+            "scrollY": false,
             "responsive": true,
             "order": [[1, 'asc']]
         });

+ 2 - 2
vfs/azblobfs.go

@@ -473,8 +473,8 @@ func (fs *AzureBlobFs) ReadDir(dirname string) ([]os.FileInfo, error) {
 	return result, nil
 }
 
-// IsUploadResumeSupported returns true if upload resume is supported.
-// Upload Resume is not supported on Azure Blob
+// IsUploadResumeSupported returns true if resuming uploads is supported.
+// Resuming uploads is not supported on Azure Blob
 func (*AzureBlobFs) IsUploadResumeSupported() bool {
 	return false
 }

+ 4 - 1
vfs/cryptfs.go

@@ -172,7 +172,10 @@ func (fs *CryptFs) Create(name string, flag int) (File, *PipeWriter, func(), err
 
 	go func() {
 		n, err := sio.Encrypt(f, r, fs.getSIOConfig(key))
-		f.Close()
+		errClose := f.Close()
+		if err == nil && errClose != nil {
+			err = errClose
+		}
 		r.CloseWithError(err) //nolint:errcheck
 		p.Done(err)
 		fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v", name, n, err)

+ 1 - 0
vfs/filesystem.go

@@ -139,6 +139,7 @@ func (f *Filesystem) GetACopy() Filesystem {
 			PrivateKey:              f.SFTPConfig.PrivateKey.Clone(),
 			Prefix:                  f.SFTPConfig.Prefix,
 			DisableCouncurrentReads: f.SFTPConfig.DisableCouncurrentReads,
+			BufferSize:              f.SFTPConfig.BufferSize,
 		},
 	}
 	if len(f.SFTPConfig.Fingerprints) > 0 {

+ 1 - 1
vfs/folder.go

@@ -183,7 +183,7 @@ func (v *VirtualFolder) GetFilesystem(connectionID string, forbiddenSelfUsers []
 	case CryptedFilesystemProvider:
 		return NewCryptFs(connectionID, v.MappedPath, v.VirtualPath, v.FsConfig.CryptConfig)
 	case SFTPFilesystemProvider:
-		return NewSFTPFs(connectionID, v.VirtualPath, forbiddenSelfUsers, v.FsConfig.SFTPConfig)
+		return NewSFTPFs(connectionID, v.VirtualPath, v.MappedPath, forbiddenSelfUsers, v.FsConfig.SFTPConfig)
 	default:
 		return NewOsFs(connectionID, v.MappedPath, v.VirtualPath), nil
 	}

+ 2 - 2
vfs/gcsfs.go

@@ -396,8 +396,8 @@ func (fs *GCSFs) ReadDir(dirname string) ([]os.FileInfo, error) {
 	return result, nil
 }
 
-// IsUploadResumeSupported returns true if upload resume is supported.
-// SFTP Resume is not supported on S3
+// IsUploadResumeSupported returns true if resuming uploads is supported.
+// Resuming uploads is not supported on GCS
 func (*GCSFs) IsUploadResumeSupported() bool {
 	return false
 }

+ 1 - 1
vfs/osfs.go

@@ -151,7 +151,7 @@ func (*OsFs) ReadDir(dirname string) ([]os.FileInfo, error) {
 	return list, nil
 }
 
-// IsUploadResumeSupported returns true if upload resume is supported
+// IsUploadResumeSupported returns true if resuming uploads is supported
 func (*OsFs) IsUploadResumeSupported() bool {
 	return true
 }

+ 2 - 2
vfs/s3fs.go

@@ -415,8 +415,8 @@ func (fs *S3Fs) ReadDir(dirname string) ([]os.FileInfo, error) {
 	return result, err
 }
 
-// IsUploadResumeSupported returns true if upload resume is supported.
-// SFTP Resume is not supported on S3
+// IsUploadResumeSupported returns true if resuming uploads is supported.
+// Resuming uploads is not supported on S3
 func (*S3Fs) IsUploadResumeSupported() bool {
 	return false
 }

+ 155 - 25
vfs/sftpfs.go

@@ -1,6 +1,7 @@
 package vfs
 
 import (
+	"bufio"
 	"errors"
 	"fmt"
 	"io"
@@ -43,8 +44,14 @@ type SFTPFsConfig struct {
 	// Concurrent reads are safe to use and disabling them will degrade performance.
 	// Some servers automatically delete files once they are downloaded.
 	// Using concurrent reads is problematic with such servers.
-	DisableCouncurrentReads bool     `json:"disable_concurrent_reads,omitempty"`
-	forbiddenSelfUsernames  []string `json:"-"`
+	DisableCouncurrentReads bool `json:"disable_concurrent_reads,omitempty"`
+	// The buffer size (in MB) to use for transfers.
+	// Buffering could improve performance for high latency networks.
+	// With buffering enabled upload resume is not supported and a file
+	// cannot be opened for both reading and writing at the same time
+	// 0 means disabled.
+	BufferSize             int64    `json:"buffer_size,omitempty"`
+	forbiddenSelfUsernames []string `json:"-"`
 }
 
 func (c *SFTPFsConfig) isEqual(other *SFTPFsConfig) bool {
@@ -60,6 +67,9 @@ func (c *SFTPFsConfig) isEqual(other *SFTPFsConfig) bool {
 	if c.DisableCouncurrentReads != other.DisableCouncurrentReads {
 		return false
 	}
+	if c.BufferSize != other.BufferSize {
+		return false
+	}
 	if len(c.Fingerprints) != len(other.Fingerprints) {
 		return false
 	}
@@ -98,6 +108,21 @@ func (c *SFTPFsConfig) Validate() error {
 	if c.Username == "" {
 		return errors.New("username cannot be empty")
 	}
+	if c.BufferSize < 0 || c.BufferSize > 64 {
+		return errors.New("invalid buffer_size, valid range is 0-64")
+	}
+	if err := c.validateCredentials(); err != nil {
+		return err
+	}
+	if c.Prefix != "" {
+		c.Prefix = utils.CleanPath(c.Prefix)
+	} else {
+		c.Prefix = "/"
+	}
+	return nil
+}
+
+func (c *SFTPFsConfig) validateCredentials() error {
 	if c.Password.IsEmpty() && c.PrivateKey.IsEmpty() {
 		return errors.New("credentials cannot be empty")
 	}
@@ -113,11 +138,6 @@ func (c *SFTPFsConfig) Validate() error {
 	if !c.PrivateKey.IsEmpty() && !c.PrivateKey.IsValidInput() {
 		return errors.New("invalid private key")
 	}
-	if c.Prefix != "" {
-		c.Prefix = utils.CleanPath(c.Prefix)
-	} else {
-		c.Prefix = "/"
-	}
 	return nil
 }
 
@@ -143,15 +163,19 @@ type SFTPFs struct {
 	sync.Mutex
 	connectionID string
 	// if not empty this fs is mouted as virtual folder in the specified path
-	mountPath  string
-	config     *SFTPFsConfig
-	sshClient  *ssh.Client
-	sftpClient *sftp.Client
-	err        chan error
+	mountPath    string
+	localTempDir string
+	config       *SFTPFsConfig
+	sshClient    *ssh.Client
+	sftpClient   *sftp.Client
+	err          chan error
 }
 
 // NewSFTPFs returns an SFTPFa object that allows to interact with an SFTP server
-func NewSFTPFs(connectionID, mountPath string, forbiddenSelfUsernames []string, config SFTPFsConfig) (Fs, error) {
+func NewSFTPFs(connectionID, mountPath, localTempDir string, forbiddenSelfUsernames []string, config SFTPFsConfig) (Fs, error) {
+	if localTempDir == "" {
+		localTempDir = filepath.Clean(os.TempDir())
+	}
 	if err := config.Validate(); err != nil {
 		return nil, err
 	}
@@ -169,6 +193,7 @@ func NewSFTPFs(connectionID, mountPath string, forbiddenSelfUsernames []string,
 	sftpFs := &SFTPFs{
 		connectionID: connectionID,
 		mountPath:    mountPath,
+		localTempDir: localTempDir,
 		config:       &config,
 		err:          make(chan error, 1),
 	}
@@ -220,7 +245,32 @@ func (fs *SFTPFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, f
 		return nil, nil, nil, err
 	}
 	f, err := fs.sftpClient.Open(name)
-	return f, nil, nil, err
+	if fs.config.BufferSize == 0 {
+		return f, nil, nil, err
+	}
+	if offset > 0 {
+		_, err = f.Seek(offset, io.SeekStart)
+		if err != nil {
+			f.Close()
+			return nil, nil, nil, err
+		}
+	}
+	r, w, err := pipeat.PipeInDir(fs.localTempDir)
+	if err != nil {
+		f.Close()
+		return nil, nil, nil, err
+	}
+	go func() {
+		br := bufio.NewReaderSize(f, int(fs.config.BufferSize)*1024*1024)
+		// we don't use io.Copy since bufio.Reader implements io.ReadFrom and
+		// so it calls the sftp.File ReadFrom method without buffering
+		n, err := fs.copy(w, br)
+		w.CloseWithError(err) //nolint:errcheck
+		f.Close()
+		fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
+	}()
+
+	return nil, r, nil, nil
 }
 
 // Create creates or opens the named file for writing
@@ -229,13 +279,51 @@ func (fs *SFTPFs) Create(name string, flag int) (File, *PipeWriter, func(), erro
 	if err != nil {
 		return nil, nil, nil, err
 	}
-	var f File
-	if flag == 0 {
-		f, err = fs.sftpClient.Create(name)
-	} else {
-		f, err = fs.sftpClient.OpenFile(name, flag)
+	if fs.config.BufferSize == 0 {
+		var f File
+		if flag == 0 {
+			f, err = fs.sftpClient.Create(name)
+		} else {
+			f, err = fs.sftpClient.OpenFile(name, flag)
+		}
+		return f, nil, nil, err
+	}
+	// buffering is enabled
+	f, err := fs.sftpClient.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	r, w, err := pipeat.PipeInDir(fs.localTempDir)
+	if err != nil {
+		f.Close()
+		return nil, nil, nil, err
 	}
-	return f, nil, nil, err
+	p := NewPipeWriter(w)
+
+	go func() {
+		bw := bufio.NewWriterSize(f, int(fs.config.BufferSize)*1024*1024)
+		// we don't use io.Copy since bufio.Writer implements io.WriterTo and
+		// so it calls the sftp.File WriteTo method without buffering
+		n, err := fs.copy(bw, r)
+		errFlush := bw.Flush()
+		if err == nil && errFlush != nil {
+			err = errFlush
+		}
+		errClose := f.Close()
+		if err == nil && errClose != nil {
+			err = errClose
+		}
+		r.CloseWithError(err) //nolint:errcheck
+		var errTruncate error
+		if err != nil {
+			errTruncate = f.Truncate(n)
+		}
+		p.Done(err)
+		fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v err truncate: %v",
+			name, n, err, errTruncate)
+	}()
+
+	return nil, p, nil, nil
 }
 
 // Rename renames (moves) source to target.
@@ -340,14 +428,14 @@ func (fs *SFTPFs) ReadDir(dirname string) ([]os.FileInfo, error) {
 	return result, nil
 }
 
-// IsUploadResumeSupported returns true if upload resume is supported.
-func (*SFTPFs) IsUploadResumeSupported() bool {
-	return true
+// IsUploadResumeSupported returns true if resuming uploads is supported.
+func (fs *SFTPFs) IsUploadResumeSupported() bool {
+	return fs.config.BufferSize == 0
 }
 
 // IsAtomicUploadSupported returns true if atomic upload is supported.
-func (*SFTPFs) IsAtomicUploadSupported() bool {
-	return true
+func (fs *SFTPFs) IsAtomicUploadSupported() bool {
+	return fs.config.BufferSize == 0
 }
 
 // IsNotExist returns a boolean indicating whether the error is known to
@@ -372,6 +460,11 @@ func (*SFTPFs) IsNotSupported(err error) bool {
 
 // CheckRootPath creates the specified local root directory if it does not exists
 func (fs *SFTPFs) CheckRootPath(username string, uid int, gid int) bool {
+	if fs.config.BufferSize > 0 {
+		// we need a local directory for temporary files
+		osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "")
+		osFs.CheckRootPath(username, uid, gid)
+	}
 	if fs.config.Prefix == "/" {
 		return true
 	}
@@ -595,6 +688,38 @@ func (fs *SFTPFs) Close() error {
 	return sshErr
 }
 
+func (fs *SFTPFs) copy(dst io.Writer, src io.Reader) (written int64, err error) {
+	buf := make([]byte, 32768)
+	for {
+		nr, er := src.Read(buf)
+		if nr > 0 {
+			nw, ew := dst.Write(buf[0:nr])
+			if nw < 0 || nr < nw {
+				nw = 0
+				if ew == nil {
+					ew = errors.New("invalid write")
+				}
+			}
+			written += int64(nw)
+			if ew != nil {
+				err = ew
+				break
+			}
+			if nr != nw {
+				err = io.ErrShortWrite
+				break
+			}
+		}
+		if er != nil {
+			if er != io.EOF {
+				err = er
+			}
+			break
+		}
+	}
+	return written, err
+}
+
 func (fs *SFTPFs) checkConnection() error {
 	err := fs.closed()
 	if err == nil {
@@ -659,6 +784,11 @@ func (fs *SFTPFs) createConnection() error {
 		opt := sftp.UseConcurrentReads(false)
 		opt(fs.sftpClient) //nolint:errcheck
 	}
+	if fs.config.BufferSize > 0 {
+		fsLog(fs, logger.LevelDebug, "enabling concurrent writes")
+		opt := sftp.UseConcurrentWrites(true)
+		opt(fs.sftpClient) //nolint:errcheck
+	}
 	go fs.wait()
 	return nil
 }

+ 31 - 0
vfs/vfs.go

@@ -553,11 +553,42 @@ func IsSFTPFs(fs Fs) bool {
 	return strings.HasPrefix(fs.Name(), sftpFsName)
 }
 
+// IsBufferedSFTPFs returns true if this is a buffered SFTP filesystem
+func IsBufferedSFTPFs(fs Fs) bool {
+	if !IsSFTPFs(fs) {
+		return false
+	}
+	return !fs.IsUploadResumeSupported()
+}
+
+// IsLocalOrUnbufferedSFTPFs returns true if fs is local or SFTP with no buffer
+func IsLocalOrUnbufferedSFTPFs(fs Fs) bool {
+	if IsLocalOsFs(fs) {
+		return true
+	}
+	if IsSFTPFs(fs) {
+		return fs.IsUploadResumeSupported()
+	}
+	return false
+}
+
 // IsLocalOrSFTPFs returns true if fs is local or SFTP
 func IsLocalOrSFTPFs(fs Fs) bool {
 	return IsLocalOsFs(fs) || IsSFTPFs(fs)
 }
 
+// HasOpenRWSupport returns true if the fs can open a file
+// for reading and writing at the same time
+func HasOpenRWSupport(fs Fs) bool {
+	if IsLocalOsFs(fs) {
+		return true
+	}
+	if IsSFTPFs(fs) && fs.IsUploadResumeSupported() {
+		return true
+	}
+	return false
+}
+
 // IsLocalOrCryptoFs returns true if fs is local or local encrypted
 func IsLocalOrCryptoFs(fs Fs) bool {
 	return IsLocalOsFs(fs) || IsCryptOsFs(fs)

+ 1 - 1
webdavd/handler.go

@@ -141,7 +141,7 @@ func (c *Connection) getFile(fs vfs.Fs, fsPath, virtualPath string) (webdav.File
 
 	// for cloud fs we open the file when we receive the first read to avoid to download the first part of
 	// the file if it was opened only to do a stat or a readdir and so it is not a real download
-	if vfs.IsLocalOrSFTPFs(fs) {
+	if vfs.IsLocalOrUnbufferedSFTPFs(fs) {
 		file, r, cancelFn, err = fs.Open(fsPath, 0)
 		if err != nil {
 			c.Log(logger.LevelWarn, "could not open file %#v for reading: %+v", fsPath, err)

+ 1 - 1
webdavd/internal_test.go

@@ -278,7 +278,7 @@ func (fs *MockOsFs) Open(name string, offset int64) (vfs.File, *pipeat.PipeReade
 	return nil, fs.reader, nil, nil
 }
 
-// IsUploadResumeSupported returns true if upload resume is supported
+// IsUploadResumeSupported returns true if resuming uploads is supported
 func (*MockOsFs) IsUploadResumeSupported() bool {
 	return false
 }

+ 78 - 3
webdavd/webdavd_test.go

@@ -1114,9 +1114,9 @@ func TestQuotaLimits(t *testing.T) {
 		client := getWebDavClient(user, true, nil)
 		// test quota files
 		err = uploadFile(testFilePath, testFileName+".quota", testFileSize, client)
-		assert.NoError(t, err)
+		assert.NoError(t, err, "username: %v", user.Username)
 		err = uploadFile(testFilePath, testFileName+".quota1", testFileSize, client)
-		assert.Error(t, err)
+		assert.Error(t, err, "username: %v", user.Username)
 		err = client.Rename(testFileName+".quota", testFileName, false)
 		assert.NoError(t, err)
 		files, err := client.ReadDir("/")
@@ -1385,6 +1385,80 @@ func TestLoginInvalidFs(t *testing.T) {
 	assert.NoError(t, err)
 }
 
+func TestSFTPBuffered(t *testing.T) {
+	u := getTestUser()
+	localUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
+	assert.NoError(t, err)
+	u = getTestSFTPUser()
+	u.QuotaFiles = 1000
+	u.HomeDir = filepath.Join(os.TempDir(), u.Username)
+	u.FsConfig.SFTPConfig.BufferSize = 2
+	sftpUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
+	assert.NoError(t, err)
+
+	client := getWebDavClient(sftpUser, true, nil)
+	assert.NoError(t, checkBasicFunc(client))
+	testFilePath := filepath.Join(homeBasePath, testFileName)
+	testFileSize := int64(65535)
+	expectedQuotaSize := testFileSize
+	expectedQuotaFiles := 1
+	err = createTestFile(testFilePath, testFileSize)
+	assert.NoError(t, err)
+	err = uploadFile(testFilePath, testFileName, testFileSize, client)
+	assert.NoError(t, err)
+	// overwrite an existing file
+	err = uploadFile(testFilePath, testFileName, testFileSize, client)
+	assert.NoError(t, err)
+	localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
+	err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
+	assert.NoError(t, err)
+
+	user, _, err := httpdtest.GetUserByUsername(sftpUser.Username, http.StatusOK)
+	assert.NoError(t, err)
+	assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
+	assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
+
+	fileContent := []byte("test file contents")
+	err = os.WriteFile(testFilePath, fileContent, os.ModePerm)
+	assert.NoError(t, err)
+	err = uploadFile(testFilePath, testFileName, int64(len(fileContent)), client)
+	assert.NoError(t, err)
+	remotePath := fmt.Sprintf("http://%v/%v", webDavServerAddr, testFileName)
+	req, err := http.NewRequest(http.MethodGet, remotePath, nil)
+	assert.NoError(t, err)
+	httpClient := httpclient.GetHTTPClient()
+	req.SetBasicAuth(user.Username, defaultPassword)
+	req.Header.Set("Range", "bytes=5-")
+	resp, err := httpClient.Do(req)
+	if assert.NoError(t, err) {
+		defer resp.Body.Close()
+		assert.Equal(t, http.StatusPartialContent, resp.StatusCode)
+		bodyBytes, err := io.ReadAll(resp.Body)
+		assert.NoError(t, err)
+		assert.Equal(t, "file contents", string(bodyBytes))
+	}
+	req.Header.Set("Range", "bytes=5-8")
+	resp, err = httpClient.Do(req)
+	if assert.NoError(t, err) {
+		defer resp.Body.Close()
+		assert.Equal(t, http.StatusPartialContent, resp.StatusCode)
+		bodyBytes, err := io.ReadAll(resp.Body)
+		assert.NoError(t, err)
+		assert.Equal(t, "file", string(bodyBytes))
+	}
+
+	err = os.Remove(testFilePath)
+	assert.NoError(t, err)
+	_, err = httpdtest.RemoveUser(sftpUser, http.StatusOK)
+	assert.NoError(t, err)
+	_, err = httpdtest.RemoveUser(localUser, http.StatusOK)
+	assert.NoError(t, err)
+	err = os.RemoveAll(localUser.GetHomeDir())
+	assert.NoError(t, err)
+	err = os.RemoveAll(sftpUser.GetHomeDir())
+	assert.NoError(t, err)
+}
+
 func TestBytesRangeRequests(t *testing.T) {
 	u := getTestUser()
 	u.Username = u.Username + "1"
@@ -1429,7 +1503,6 @@ func TestBytesRangeRequests(t *testing.T) {
 			}
 		}
 
-		assert.NoError(t, err)
 		err = os.Remove(testFilePath)
 		assert.NoError(t, err)
 		_, err = httpdtest.RemoveUser(user, http.StatusOK)
@@ -2125,6 +2198,8 @@ func TestSFTPLoopVirtualFolders(t *testing.T) {
 	assert.NoError(t, err)
 	err = os.RemoveAll(user2.GetHomeDir())
 	assert.NoError(t, err)
+	_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: "sftp"}, http.StatusOK)
+	assert.NoError(t, err)
 }
 
 func TestNestedVirtualFolders(t *testing.T) {