mirror of
https://github.com/drakkan/sftpgo.git
synced 2024-11-25 09:00:27 +00:00
add a maximum allowed size for a single upload
This commit is contained in:
parent
0dbf0cc81f
commit
fa5333784b
21 changed files with 373 additions and 66 deletions
|
@ -614,6 +614,36 @@ func (c *BaseConnection) hasSpaceForCrossRename(quotaResult vfs.QuotaCheckResult
|
|||
return true
|
||||
}
|
||||
|
||||
// GetMaxWriteSize returns the allowed size for an upload or an error
|
||||
// if no enough size is available for a resume/append
|
||||
func (c *BaseConnection) GetMaxWriteSize(quotaResult vfs.QuotaCheckResult, isResume bool, fileSize int64) (int64, error) {
|
||||
maxWriteSize := quotaResult.GetRemainingSize()
|
||||
|
||||
if isResume {
|
||||
if !c.Fs.IsUploadResumeSupported() {
|
||||
return 0, c.GetOpUnsupportedError()
|
||||
}
|
||||
if c.User.Filters.MaxUploadFileSize > 0 && c.User.Filters.MaxUploadFileSize <= fileSize {
|
||||
return 0, ErrQuotaExceeded
|
||||
}
|
||||
if c.User.Filters.MaxUploadFileSize > 0 {
|
||||
maxUploadSize := c.User.Filters.MaxUploadFileSize - fileSize
|
||||
if maxUploadSize < maxWriteSize || maxWriteSize == 0 {
|
||||
maxWriteSize = maxUploadSize
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if maxWriteSize > 0 {
|
||||
maxWriteSize += fileSize
|
||||
}
|
||||
if c.User.Filters.MaxUploadFileSize > 0 && (c.User.Filters.MaxUploadFileSize < maxWriteSize || maxWriteSize == 0) {
|
||||
maxWriteSize = c.User.Filters.MaxUploadFileSize
|
||||
}
|
||||
}
|
||||
|
||||
return maxWriteSize, nil
|
||||
}
|
||||
|
||||
// HasSpace checks user's quota usage
|
||||
func (c *BaseConnection) HasSpace(checkFiles bool, requestPath string) vfs.QuotaCheckResult {
|
||||
result := vfs.QuotaCheckResult{
|
||||
|
|
|
@ -32,6 +32,10 @@ func (fs MockOsFs) HasVirtualFolders() bool {
|
|||
return fs.hasVirtualFolders
|
||||
}
|
||||
|
||||
func (fs MockOsFs) IsUploadResumeSupported() bool {
|
||||
return !fs.hasVirtualFolders
|
||||
}
|
||||
|
||||
func newMockOsFs(hasVirtualFolders bool, connectionID, rootDir string) vfs.Fs {
|
||||
return &MockOsFs{
|
||||
Fs: vfs.NewOsFs(connectionID, rootDir, nil),
|
||||
|
@ -536,7 +540,7 @@ func TestSpaceForCrossRename(t *testing.T) {
|
|||
user := dataprovider.User{
|
||||
Username: userTestUsername,
|
||||
Permissions: permissions,
|
||||
HomeDir: os.TempDir(),
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
}
|
||||
fs, err := user.GetFilesystem("123")
|
||||
assert.NoError(t, err)
|
||||
|
@ -1062,3 +1066,53 @@ func TestErrorsMapping(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxWriteSize(t *testing.T) {
|
||||
permissions := make(map[string][]string)
|
||||
permissions["/"] = []string{dataprovider.PermAny}
|
||||
user := dataprovider.User{
|
||||
Username: userTestUsername,
|
||||
Permissions: permissions,
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
}
|
||||
fs, err := user.GetFilesystem("123")
|
||||
assert.NoError(t, err)
|
||||
conn := NewBaseConnection("", ProtocolFTP, user, fs)
|
||||
quotaResult := vfs.QuotaCheckResult{
|
||||
HasSpace: true,
|
||||
}
|
||||
size, err := conn.GetMaxWriteSize(quotaResult, false, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), size)
|
||||
|
||||
conn.User.Filters.MaxUploadFileSize = 100
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, false, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(100), size)
|
||||
|
||||
quotaResult.QuotaSize = 1000
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, false, 50)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(100), size)
|
||||
|
||||
quotaResult.QuotaSize = 1000
|
||||
quotaResult.UsedSize = 990
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, false, 50)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(60), size)
|
||||
|
||||
quotaResult.QuotaSize = 0
|
||||
quotaResult.UsedSize = 0
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, true, 100)
|
||||
assert.EqualError(t, err, ErrQuotaExceeded.Error())
|
||||
assert.Equal(t, int64(0), size)
|
||||
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, true, 10)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(90), size)
|
||||
|
||||
conn.Fs = newMockOsFs(true, fs.ConnectionID(), user.GetHomeDir())
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, true, 100)
|
||||
assert.EqualError(t, err, ErrOpUnsupported.Error())
|
||||
assert.Equal(t, int64(0), size)
|
||||
}
|
||||
|
|
|
@ -97,6 +97,8 @@ type UserFilters struct {
|
|||
// filters based on file extensions.
|
||||
// Please note that these restrictions can be easily bypassed.
|
||||
FileExtensions []ExtensionsFilter `json:"file_extensions,omitempty"`
|
||||
// max size allowed for a single upload, 0 means unlimited
|
||||
MaxUploadFileSize int64 `json:"max_upload_file_size,omitempty"`
|
||||
}
|
||||
|
||||
// Filesystem defines cloud storage filesystem details
|
||||
|
@ -664,6 +666,7 @@ func (u *User) getACopy() User {
|
|||
permissions[k] = perms
|
||||
}
|
||||
filters := UserFilters{}
|
||||
filters.MaxUploadFileSize = u.Filters.MaxUploadFileSize
|
||||
filters.AllowedIP = make([]string, len(u.Filters.AllowedIP))
|
||||
copy(filters.AllowedIP, u.Filters.AllowedIP)
|
||||
filters.DeniedIP = make([]string, len(u.Filters.DeniedIP))
|
||||
|
|
|
@ -30,6 +30,7 @@ For each account, the following properties can be configured:
|
|||
- `download_bandwidth` maximum download bandwidth as KB/s, 0 means unlimited.
|
||||
- `allowed_ip`, List of IP/Mask allowed to login. Any IP address not contained in this list cannot login. IP/Mask must be in CIDR notation as defined in RFC 4632 and RFC 4291, for example "192.0.2.0/24" or "2001:db8::/32"
|
||||
- `denied_ip`, List of IP/Mask not allowed to login. If an IP address is both allowed and denied then login will be denied
|
||||
- `max_upload_file_size`, max allowed size, as bytes, for a single file upload. The upload will be aborted if/when the size of the file being sent exceeds this limit. 0 means unlimited. This restriction does not apply for SSH system commands such as `git` and `rsync`
|
||||
- `denied_login_methods`, List of login methods not allowed. To enable multi-step authentication you have to allow only multi-step login methods. The following login methods are supported:
|
||||
- `publickey`
|
||||
- `password`
|
||||
|
|
|
@ -8,8 +8,9 @@ For system commands we have no direct control on file creation/deletion and so t
|
|||
- system commands work only on local filyestem
|
||||
- we cannot avoid to leak real filesystem paths
|
||||
- quota check is suboptimal
|
||||
- maximum size restriction on single file is not respected
|
||||
|
||||
If quota is enabled and SFTPGO receives a system command, the used size and number of files are checked at the command start and not while new files are created/deleted. While the command is running the number of files is not checked, the remaining size is calculated as the difference between the max allowed quota and the used one, and it is checked against the bytes transferred via SSH. The command is aborted if it uploads more bytes than the remaining allowed size calculated at the command start. Anyway, we only see the bytes that the remote command sends to the local one via SSH. These bytes contain both protocol commands and files, and so the size of the files is different from the size trasferred via SSH: for example, a command can send compressed files, or a protocol command (few bytes) could delete a big file. To mitigate these issues, quotas are recalculated at the command end with a full scan of the directory specified for the system command. This could be heavy for big directories. If you need system commands and quotas you could consider disabling quota restrictions and periodically update quota usage yourself using the REST API.
|
||||
If quota is enabled and SFTPGo receives a system command, the used size and number of files are checked at the command start and not while new files are created/deleted. While the command is running the number of files is not checked, the remaining size is calculated as the difference between the max allowed quota and the used one, and it is checked against the bytes transferred via SSH. The command is aborted if it uploads more bytes than the remaining allowed size calculated at the command start. Anyway, we only see the bytes that the remote command sends to the local one via SSH. These bytes contain both protocol commands and files, and so the size of the files is different from the size trasferred via SSH: for example, a command can send compressed files, or a protocol command (few bytes) could delete a big file. To mitigate these issues, quotas are recalculated at the command end with a full scan of the directory specified for the system command. This could be heavy for big directories. If you need system commands and quotas you could consider disabling quota restrictions and periodically update quota usage yourself using the REST API.
|
||||
|
||||
For these reasons we should limit system commands usage as much as possible, we currently support the following system commands:
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ Output:
|
|||
Command:
|
||||
|
||||
```console
|
||||
python sftpgo_api_cli.py update-user 9576 test_username --password "test_pwd" --home-dir="/tmp/test_home_dir" --uid 0 --gid 33 --max-sessions 3 --quota-size 0 --quota-files 4 --permissions "*" --subdirs-permissions "/dir1::list,download,create_symlinks" --upload-bandwidth 90 --download-bandwidth 80 --status 1 --expiration-date "" --allowed-ip "" --denied-ip "192.168.1.0/24" --denied-login-methods "" --fs local --virtual-folders "/vdir1::/tmp/mapped1::-1::-1" "/vdir2::/tmp/mapped2::100::104857600" --allowed-extensions "" --denied-extensions ""
|
||||
python sftpgo_api_cli.py update-user 9576 test_username --password "test_pwd" --home-dir="/tmp/test_home_dir" --uid 0 --gid 33 --max-sessions 3 --quota-size 0 --quota-files 4 --permissions "*" --subdirs-permissions "/dir1::list,download,create_symlinks" --upload-bandwidth 90 --download-bandwidth 80 --status 1 --expiration-date "" --allowed-ip "" --denied-ip "192.168.1.0/24" --denied-login-methods "" --fs local --virtual-folders "/vdir1::/tmp/mapped1::-1::-1" "/vdir2::/tmp/mapped2::100::104857600" --allowed-extensions "" --denied-extensions "" --max-upload-file-size 104857600
|
||||
```
|
||||
|
||||
Output:
|
||||
|
@ -175,7 +175,31 @@ Output:
|
|||
"filters": {
|
||||
"denied_ip": [
|
||||
"192.168.1.0/24"
|
||||
]
|
||||
],
|
||||
"file_extensions": [
|
||||
{
|
||||
"allowed_extensions": [
|
||||
".jpg",
|
||||
".png"
|
||||
],
|
||||
"path": "/dir1"
|
||||
},
|
||||
{
|
||||
"allowed_extensions": [
|
||||
".rar",
|
||||
".png"
|
||||
],
|
||||
"path": "/dir2"
|
||||
},
|
||||
{
|
||||
"denied_extensions": [
|
||||
".zip",
|
||||
".rar"
|
||||
],
|
||||
"path": "/dir3"
|
||||
}
|
||||
],
|
||||
"max_upload_file_size": 104857600
|
||||
},
|
||||
"gid": 33,
|
||||
"home_dir": "/tmp/test_home_dir",
|
||||
|
|
|
@ -81,7 +81,8 @@ class SFTPGoApiRequests:
|
|||
s3_region='', s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='',
|
||||
s3_key_prefix='', gcs_bucket='', gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file='',
|
||||
gcs_automatic_credentials='automatic', denied_login_methods=[], virtual_folders=[],
|
||||
denied_extensions=[], allowed_extensions=[], s3_upload_part_size=0, s3_upload_concurrency=0):
|
||||
denied_extensions=[], allowed_extensions=[], s3_upload_part_size=0, s3_upload_concurrency=0,
|
||||
max_upload_file_size=0):
|
||||
user = {'id':user_id, 'username':username, 'uid':uid, 'gid':gid,
|
||||
'max_sessions':max_sessions, 'quota_size':quota_size, 'quota_files':quota_files,
|
||||
'upload_bandwidth':upload_bandwidth, 'download_bandwidth':download_bandwidth,
|
||||
|
@ -99,9 +100,9 @@ class SFTPGoApiRequests:
|
|||
user.update({'permissions':permissions})
|
||||
if virtual_folders:
|
||||
user.update({'virtual_folders':self.buildVirtualFolders(virtual_folders)})
|
||||
if allowed_ip or denied_ip or denied_login_methods or allowed_extensions or denied_extensions:
|
||||
user.update({'filters':self.buildFilters(allowed_ip, denied_ip, denied_login_methods, denied_extensions,
|
||||
allowed_extensions)})
|
||||
|
||||
user.update({'filters':self.buildFilters(allowed_ip, denied_ip, denied_login_methods, denied_extensions,
|
||||
allowed_extensions, max_upload_file_size)})
|
||||
user.update({'filesystem':self.buildFsConfig(fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret,
|
||||
s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket,
|
||||
gcs_key_prefix, gcs_storage_class, gcs_credentials_file,
|
||||
|
@ -152,8 +153,9 @@ class SFTPGoApiRequests:
|
|||
permissions.update({directory:values})
|
||||
return permissions
|
||||
|
||||
def buildFilters(self, allowed_ip, denied_ip, denied_login_methods, denied_extensions, allowed_extensions):
|
||||
filters = {}
|
||||
def buildFilters(self, allowed_ip, denied_ip, denied_login_methods, denied_extensions, allowed_extensions,
|
||||
max_upload_file_size):
|
||||
filters = {"max_upload_file_size":max_upload_file_size}
|
||||
if allowed_ip:
|
||||
if len(allowed_ip) == 1 and not allowed_ip[0]:
|
||||
filters.update({'allowed_ip':[]})
|
||||
|
@ -256,13 +258,13 @@ class SFTPGoApiRequests:
|
|||
s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='', s3_key_prefix='', gcs_bucket='',
|
||||
gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file='', gcs_automatic_credentials='automatic',
|
||||
denied_login_methods=[], virtual_folders=[], denied_extensions=[], allowed_extensions=[],
|
||||
s3_upload_part_size=0, s3_upload_concurrency=0):
|
||||
s3_upload_part_size=0, s3_upload_concurrency=0, max_upload_file_size=0):
|
||||
u = self.buildUserObject(0, username, password, public_keys, home_dir, uid, gid, max_sessions,
|
||||
quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth,
|
||||
status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key,
|
||||
s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class,
|
||||
gcs_credentials_file, gcs_automatic_credentials, denied_login_methods, virtual_folders, denied_extensions,
|
||||
allowed_extensions, s3_upload_part_size, s3_upload_concurrency)
|
||||
allowed_extensions, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size)
|
||||
r = requests.post(self.userPath, json=u, auth=self.auth, verify=self.verify)
|
||||
self.printResponse(r)
|
||||
|
||||
|
@ -272,13 +274,13 @@ class SFTPGoApiRequests:
|
|||
s3_bucket='', s3_region='', s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='',
|
||||
s3_key_prefix='', gcs_bucket='', gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file='',
|
||||
gcs_automatic_credentials='automatic', denied_login_methods=[], virtual_folders=[], denied_extensions=[],
|
||||
allowed_extensions=[], s3_upload_part_size=0, s3_upload_concurrency=0):
|
||||
allowed_extensions=[], s3_upload_part_size=0, s3_upload_concurrency=0, max_upload_file_size=0):
|
||||
u = self.buildUserObject(user_id, username, password, public_keys, home_dir, uid, gid, max_sessions,
|
||||
quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth,
|
||||
status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key,
|
||||
s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class,
|
||||
gcs_credentials_file, gcs_automatic_credentials, denied_login_methods, virtual_folders, denied_extensions,
|
||||
allowed_extensions, s3_upload_part_size, s3_upload_concurrency)
|
||||
allowed_extensions, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size)
|
||||
r = requests.put(urlparse.urljoin(self.userPath, 'user/' + str(user_id)), json=u, auth=self.auth, verify=self.verify)
|
||||
self.printResponse(r)
|
||||
|
||||
|
@ -567,6 +569,8 @@ def addCommonUserArguments(parser):
|
|||
help='Maximum download bandwidth as KB/s, 0 means unlimited. Default: %(default)s')
|
||||
parser.add_argument('--status', type=int, choices=[0, 1], default=1,
|
||||
help='User\'s status. 1 enabled, 0 disabled. Default: %(default)s')
|
||||
parser.add_argument('--max-upload-file-size', type=int, default=0,
|
||||
help='Maximum allowed size, as bytes, for a single file upload, 0 means unlimited. Default: %(default)s')
|
||||
parser.add_argument('-E', '--expiration-date', type=validDate, default='',
|
||||
help='Expiration date as YYYY-MM-DD, empty string means no expiration. Default: %(default)s')
|
||||
parser.add_argument('-Y', '--allowed-ip', type=str, nargs='+', default=[],
|
||||
|
@ -750,7 +754,7 @@ if __name__ == '__main__':
|
|||
args.s3_endpoint, args.s3_storage_class, args.s3_key_prefix, args.gcs_bucket, args.gcs_key_prefix,
|
||||
args.gcs_storage_class, args.gcs_credentials_file, args.gcs_automatic_credentials,
|
||||
args.denied_login_methods, args.virtual_folders, args.denied_extensions, args.allowed_extensions,
|
||||
args.s3_upload_part_size, args.s3_upload_concurrency)
|
||||
args.s3_upload_part_size, args.s3_upload_concurrency, args.max_upload_file_size)
|
||||
elif args.command == 'update-user':
|
||||
api.updateUser(args.id, args.username, args.password, args.public_keys, args.home_dir, args.uid, args.gid,
|
||||
args.max_sessions, args.quota_size, args.quota_files, args.permissions, args.upload_bandwidth,
|
||||
|
@ -760,7 +764,7 @@ if __name__ == '__main__':
|
|||
args.s3_key_prefix, args.gcs_bucket, args.gcs_key_prefix, args.gcs_storage_class,
|
||||
args.gcs_credentials_file, args.gcs_automatic_credentials, args.denied_login_methods,
|
||||
args.virtual_folders, args.denied_extensions, args.allowed_extensions, args.s3_upload_part_size,
|
||||
args.s3_upload_concurrency)
|
||||
args.s3_upload_concurrency, args.max_upload_file_size)
|
||||
elif args.command == 'delete-user':
|
||||
api.deleteUser(args.id)
|
||||
elif args.command == 'get-users':
|
||||
|
|
|
@ -761,6 +761,39 @@ func TestQuotaLimits(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUploadMaxSize(t *testing.T) {
|
||||
testFileSize := int64(65535)
|
||||
u := getTestUser()
|
||||
u.Filters.MaxUploadFileSize = testFileSize + 1
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
testFileSize1 := int64(131072)
|
||||
testFileName1 := "test_file1.dat"
|
||||
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
|
||||
err = createTestFile(testFilePath1, testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
client, err := getFTPClient(user, false)
|
||||
if assert.NoError(t, err) {
|
||||
err = ftpUploadFile(testFilePath1, testFileName1, testFileSize1, client, 0)
|
||||
assert.Error(t, err)
|
||||
err = ftpUploadFile(testFilePath, testFileName, testFileSize, client, 0)
|
||||
assert.NoError(t, err)
|
||||
err = client.Quit()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath1)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLoginWithIPilters(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Filters.DeniedIP = []string{"192.167.0.0/24", "172.18.0.0/16"}
|
||||
|
@ -1103,6 +1136,25 @@ func TestAllocate(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
user.Filters.MaxUploadFileSize = 100
|
||||
user.QuotaSize = 0
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client, err = getFTPClient(user, false)
|
||||
if assert.NoError(t, err) {
|
||||
code, response, err := client.SendCustomCommand("allo 99")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ftp.StatusCommandOK, code)
|
||||
assert.Equal(t, "Done !", response)
|
||||
code, response, err = client.SendCustomCommand("allo 150")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ftp.StatusFileUnavailable, code)
|
||||
assert.Contains(t, response, common.ErrQuotaExceeded.Error())
|
||||
|
||||
err = client.Quit()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveFolder(vfs.BaseVirtualFolder{MappedPath: mappedPath}, http.StatusOK)
|
||||
|
|
|
@ -195,6 +195,11 @@ func (c *Connection) Chtimes(name string, atime time.Time, mtime time.Time) erro
|
|||
// AllocateSpace implements ClientDriverExtensionAllocate
|
||||
func (c *Connection) AllocateSpace(size int) error {
|
||||
c.UpdateLastActivity()
|
||||
// check the max allowed file size first
|
||||
if c.User.Filters.MaxUploadFileSize > 0 && int64(size) > c.User.Filters.MaxUploadFileSize {
|
||||
return common.ErrQuotaExceeded
|
||||
}
|
||||
|
||||
// we don't have a path here so we check home dir and any virtual folders
|
||||
// we return no error if there is space in any folder
|
||||
folders := []string{"/"}
|
||||
|
@ -344,9 +349,12 @@ func (c *Connection) handleFTPUploadToNewFile(resolvedPath, filePath, requestPat
|
|||
|
||||
vfs.SetPathPermissions(c.Fs, filePath, c.User.GetUID(), c.User.GetGID())
|
||||
|
||||
// we can get an error only for resume
|
||||
maxWriteSize, _ := c.GetMaxWriteSize(quotaResult, false, 0)
|
||||
|
||||
baseTransfer := common.NewBaseTransfer(file, c.BaseConnection, cancelFn, resolvedPath, requestPath,
|
||||
common.TransferUpload, 0, 0, true)
|
||||
t := newTransfer(baseTransfer, w, nil, quotaResult.GetRemainingSize(), 0)
|
||||
t := newTransfer(baseTransfer, w, nil, maxWriteSize, 0)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
@ -360,10 +368,13 @@ func (c *Connection) handleFTPUploadToExistingFile(flags int, resolvedPath, file
|
|||
return nil, common.ErrQuotaExceeded
|
||||
}
|
||||
minWriteOffset := int64(0)
|
||||
|
||||
if flags&os.O_APPEND != 0 && flags&os.O_TRUNC == 0 && !c.Fs.IsUploadResumeSupported() {
|
||||
c.Log(logger.LevelInfo, "upload resume requested for path: %#v but not supported in fs implementation", resolvedPath)
|
||||
return nil, c.GetOpUnsupportedError()
|
||||
isResume := flags&os.O_APPEND != 0 && flags&os.O_TRUNC == 0
|
||||
// if there is a size limit remaining size cannot be 0 here, since quotaResult.HasSpace
|
||||
// will return false in this case and we deny the upload before
|
||||
maxWriteSize, err := c.GetMaxWriteSize(quotaResult, isResume, fileSize)
|
||||
if err != nil {
|
||||
c.Log(logger.LevelDebug, "unable to get max write size: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if common.Config.IsAtomicUploadEnabled() && c.Fs.IsAtomicUploadSupported() {
|
||||
|
@ -382,10 +393,7 @@ func (c *Connection) handleFTPUploadToExistingFile(flags int, resolvedPath, file
|
|||
}
|
||||
|
||||
initialSize := int64(0)
|
||||
// if there is a size limit remaining size cannot be 0 here, since quotaResult.HasSpace
|
||||
// will return false in this case and we deny the upload before
|
||||
maxWriteSize := quotaResult.GetRemainingSize()
|
||||
if flags&os.O_APPEND != 0 && flags&os.O_TRUNC == 0 {
|
||||
if isResume {
|
||||
c.Log(logger.LevelDebug, "upload resume requested, file path: %#v initial size: %v", filePath, fileSize)
|
||||
minWriteOffset = fileSize
|
||||
} else {
|
||||
|
@ -402,9 +410,6 @@ func (c *Connection) handleFTPUploadToExistingFile(flags int, resolvedPath, file
|
|||
} else {
|
||||
initialSize = fileSize
|
||||
}
|
||||
if maxWriteSize > 0 {
|
||||
maxWriteSize += fileSize
|
||||
}
|
||||
}
|
||||
|
||||
vfs.SetPathPermissions(c.Fs, filePath, c.User.GetUID(), c.User.GetGID())
|
||||
|
|
|
@ -106,13 +106,11 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
currentPermissions := user.Permissions
|
||||
currentFileExtensions := user.Filters.FileExtensions
|
||||
currentS3AccessSecret := ""
|
||||
if user.FsConfig.Provider == 1 {
|
||||
currentS3AccessSecret = user.FsConfig.S3Config.AccessSecret
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Filters.FileExtensions = []dataprovider.ExtensionsFilter{}
|
||||
err = render.DecodeJSON(r.Body, &user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
|
@ -122,10 +120,6 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
|
|||
if len(user.Permissions) == 0 {
|
||||
user.Permissions = currentPermissions
|
||||
}
|
||||
// we use new file extensions if passed otherwise the old ones
|
||||
if len(user.Filters.FileExtensions) == 0 {
|
||||
user.Filters.FileExtensions = currentFileExtensions
|
||||
}
|
||||
// we use the new access secret if different from the old one and not empty
|
||||
if user.FsConfig.Provider == 1 {
|
||||
if utils.RemoveDecryptionKey(currentS3AccessSecret) == user.FsConfig.S3Config.AccessSecret ||
|
||||
|
|
|
@ -708,6 +708,9 @@ func compareUserFilters(expected *dataprovider.User, actual *dataprovider.User)
|
|||
if len(expected.Filters.DeniedLoginMethods) != len(actual.Filters.DeniedLoginMethods) {
|
||||
return errors.New("Denied login methods mismatch")
|
||||
}
|
||||
if expected.Filters.MaxUploadFileSize != actual.Filters.MaxUploadFileSize {
|
||||
return errors.New("Max upload file size mismatch")
|
||||
}
|
||||
for _, IPMask := range expected.Filters.AllowedIP {
|
||||
if !utils.IsStringInSlice(IPMask, actual.Filters.AllowedIP) {
|
||||
return errors.New("AllowedIP contents mismatch")
|
||||
|
|
|
@ -628,6 +628,7 @@ func TestUpdateUser(t *testing.T) {
|
|||
AllowedExtensions: []string{".zip", ".rar"},
|
||||
DeniedExtensions: []string{".jpg", ".png"},
|
||||
})
|
||||
user.Filters.MaxUploadFileSize = 4096
|
||||
user.UploadBandwidth = 1024
|
||||
user.DownloadBandwidth = 512
|
||||
user.VirtualFolders = nil
|
||||
|
@ -2329,6 +2330,14 @@ func TestWebUserAddMock(t *testing.T) {
|
|||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
form.Set("denied_ip", "")
|
||||
// test invalid max file upload size
|
||||
form.Set("max_upload_file_size", "a")
|
||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath, &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
form.Set("max_upload_file_size", "1000")
|
||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath, &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
|
@ -2351,6 +2360,7 @@ func TestWebUserAddMock(t *testing.T) {
|
|||
assert.Equal(t, user.UID, newUser.UID)
|
||||
assert.Equal(t, user.UploadBandwidth, newUser.UploadBandwidth)
|
||||
assert.Equal(t, user.DownloadBandwidth, newUser.DownloadBandwidth)
|
||||
assert.Equal(t, int64(1000), newUser.Filters.MaxUploadFileSize)
|
||||
assert.True(t, utils.IsStringInSlice(testPubKey, newUser.PublicKeys))
|
||||
if val, ok := newUser.Permissions["/subdir"]; ok {
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.PermListItems, val))
|
||||
|
@ -2410,6 +2420,7 @@ func TestWebUserUpdateMock(t *testing.T) {
|
|||
form.Set("denied_ip", " 10.0.0.2/32 ")
|
||||
form.Set("denied_extensions", "/dir1::.zip")
|
||||
form.Set("ssh_login_methods", dataprovider.SSHLoginMethodKeyboardInteractive)
|
||||
form.Set("max_upload_file_size", "100")
|
||||
b, contentType, _ := getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
|
@ -2429,6 +2440,7 @@ func TestWebUserUpdateMock(t *testing.T) {
|
|||
assert.Equal(t, user.QuotaSize, updateUser.QuotaSize)
|
||||
assert.Equal(t, user.UID, updateUser.UID)
|
||||
assert.Equal(t, user.GID, updateUser.GID)
|
||||
assert.Equal(t, int64(100), updateUser.Filters.MaxUploadFileSize)
|
||||
|
||||
if val, ok := updateUser.Permissions["/otherdir"]; ok {
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.PermListItems, val))
|
||||
|
@ -2491,6 +2503,7 @@ func TestWebUserS3Mock(t *testing.T) {
|
|||
form.Set("s3_key_prefix", user.FsConfig.S3Config.KeyPrefix)
|
||||
form.Set("allowed_extensions", "/dir1::.jpg,.png")
|
||||
form.Set("denied_extensions", "/dir2::.zip")
|
||||
form.Set("max_upload_file_size", "0")
|
||||
// test invalid s3_upload_part_size
|
||||
form.Set("s3_upload_part_size", "a")
|
||||
b, contentType, _ := getMultipartFormData(form, "", "")
|
||||
|
@ -2576,6 +2589,7 @@ func TestWebUserGCSMock(t *testing.T) {
|
|||
form.Set("gcs_storage_class", user.FsConfig.GCSConfig.StorageClass)
|
||||
form.Set("gcs_key_prefix", user.FsConfig.GCSConfig.KeyPrefix)
|
||||
form.Set("allowed_extensions", "/dir1::.jpg,.png")
|
||||
form.Set("max_upload_file_size", "0")
|
||||
b, contentType, _ := getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
|
|
|
@ -170,6 +170,11 @@ func TestCompareUserFilters(t *testing.T) {
|
|||
assert.Error(t, err)
|
||||
expected.Filters.DeniedLoginMethods = []string{}
|
||||
actual.Filters.DeniedLoginMethods = []string{}
|
||||
expected.Filters.MaxUploadFileSize = 0
|
||||
actual.Filters.MaxUploadFileSize = 100
|
||||
err = checkUser(expected, actual)
|
||||
assert.Error(t, err)
|
||||
actual.Filters.MaxUploadFileSize = 0
|
||||
expected.Filters.FileExtensions = append(expected.Filters.FileExtensions, dataprovider.ExtensionsFilter{
|
||||
Path: "/",
|
||||
AllowedExtensions: []string{".jpg", ".png"},
|
||||
|
|
|
@ -2,7 +2,7 @@ openapi: 3.0.1
|
|||
info:
|
||||
title: SFTPGo
|
||||
description: 'SFTPGo REST API'
|
||||
version: 1.9.3
|
||||
version: 1.9.4
|
||||
|
||||
servers:
|
||||
- url: /api/v1
|
||||
|
@ -1528,6 +1528,11 @@ components:
|
|||
$ref: '#/components/schemas/ExtensionsFilter'
|
||||
nullable: true
|
||||
description: filters based on file extensions. These restrictions do not apply to files listing for performance reasons, so a denied file cannot be downloaded/overwritten/renamed but it will still be listed in the list of files. Please note that these restrictions can be easily bypassed
|
||||
max_upload_file_size:
|
||||
type: integer
|
||||
format: int64
|
||||
nullable: true
|
||||
description: maximum allowed size, as bytes, for a single file upload. The upload will be aborted if/when the size of the file being sent exceeds this limit. 0 means unlimited. This restriction does not apply for SSH system commands such as `git` and `rsync`
|
||||
description: Additional restrictions
|
||||
S3Config:
|
||||
type: object
|
||||
|
|
|
@ -504,6 +504,8 @@ func getUserFromPostFields(r *http.Request) (dataprovider.User, error) {
|
|||
Filters: getFiltersFromUserPostFields(r),
|
||||
FsConfig: fsConfig,
|
||||
}
|
||||
maxFileSize, err := strconv.ParseInt(r.Form.Get("max_upload_file_size"), 10, 64)
|
||||
user.Filters.MaxUploadFileSize = maxFileSize
|
||||
return user, err
|
||||
}
|
||||
|
||||
|
|
|
@ -266,9 +266,12 @@ func (c *Connection) handleSFTPUploadToNewFile(resolvedPath, filePath, requestPa
|
|||
|
||||
vfs.SetPathPermissions(c.Fs, filePath, c.User.GetUID(), c.User.GetGID())
|
||||
|
||||
// we can get an error only for resume
|
||||
maxWriteSize, _ := c.GetMaxWriteSize(quotaResult, false, 0)
|
||||
|
||||
baseTransfer := common.NewBaseTransfer(file, c.BaseConnection, cancelFn, resolvedPath, requestPath,
|
||||
common.TransferUpload, 0, 0, true)
|
||||
t := newTransfer(baseTransfer, w, nil, quotaResult.GetRemainingSize())
|
||||
t := newTransfer(baseTransfer, w, nil, maxWriteSize)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
@ -284,10 +287,14 @@ func (c *Connection) handleSFTPUploadToExistingFile(pflags sftp.FileOpenFlags, r
|
|||
|
||||
minWriteOffset := int64(0)
|
||||
osFlags := getOSOpenFlags(pflags)
|
||||
isResume := pflags.Append && osFlags&os.O_TRUNC == 0
|
||||
|
||||
if pflags.Append && osFlags&os.O_TRUNC == 0 && !c.Fs.IsUploadResumeSupported() {
|
||||
c.Log(logger.LevelInfo, "upload resume requested for path: %#v but not supported in fs implementation", resolvedPath)
|
||||
return nil, sftp.ErrSSHFxOpUnsupported
|
||||
// if there is a size limit remaining size cannot be 0 here, since quotaResult.HasSpace
|
||||
// will return false in this case and we deny the upload before
|
||||
maxWriteSize, err := c.GetMaxWriteSize(quotaResult, isResume, fileSize)
|
||||
if err != nil {
|
||||
c.Log(logger.LevelDebug, "unable to get max write size: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if common.Config.IsAtomicUploadEnabled() && c.Fs.IsAtomicUploadSupported() {
|
||||
|
@ -306,11 +313,8 @@ func (c *Connection) handleSFTPUploadToExistingFile(pflags sftp.FileOpenFlags, r
|
|||
}
|
||||
|
||||
initialSize := int64(0)
|
||||
// if there is a size limit remaining size cannot be 0 here, since quotaResult.HasSpace
|
||||
// will return false in this case and we deny the upload before
|
||||
maxWriteSize := quotaResult.GetRemainingSize()
|
||||
if pflags.Append && osFlags&os.O_TRUNC == 0 {
|
||||
c.Log(logger.LevelDebug, "upload resume requested, file path: %#v initial size: %v", filePath, fileSize)
|
||||
if isResume {
|
||||
c.Log(logger.LevelDebug, "upload resume requested, file path %#v initial size: %v", filePath, fileSize)
|
||||
minWriteOffset = fileSize
|
||||
} else {
|
||||
if vfs.IsLocalOsFs(c.Fs) {
|
||||
|
|
|
@ -194,6 +194,8 @@ func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead
|
|||
return err
|
||||
}
|
||||
|
||||
maxWriteSize, _ := c.connection.GetMaxWriteSize(quotaResult, false, fileSize)
|
||||
|
||||
file, w, cancelFn, err := c.connection.Fs.Create(filePath, 0)
|
||||
if err != nil {
|
||||
c.connection.Log(logger.LevelError, "error creating file %#v: %v", resolvedPath, err)
|
||||
|
@ -202,7 +204,6 @@ func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead
|
|||
}
|
||||
|
||||
initialSize := int64(0)
|
||||
maxWriteSize := quotaResult.GetRemainingSize()
|
||||
if !isNewFile {
|
||||
if vfs.IsLocalOsFs(c.connection.Fs) {
|
||||
vfolder, err := c.connection.User.GetVirtualFolderForPath(path.Dir(requestPath))
|
||||
|
|
|
@ -2131,6 +2131,39 @@ func TestQuotaLimits(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUploadMaxSize(t *testing.T) {
|
||||
testFileSize := int64(65535)
|
||||
usePubKey := false
|
||||
u := getTestUser(usePubKey)
|
||||
u.Filters.MaxUploadFileSize = testFileSize + 1
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
testFileSize1 := int64(131072)
|
||||
testFileName1 := "test_file1.dat"
|
||||
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
|
||||
err = createTestFile(testFilePath1, testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
err = sftpUploadFile(testFilePath1, testFileName1, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath1)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestBandwidthAndConnections(t *testing.T) {
|
||||
usePubKey := false
|
||||
testFileSize := int64(524288)
|
||||
|
@ -6375,6 +6408,36 @@ func TestSCPExtensionsFilter(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSCPUploadMaxSize(t *testing.T) {
|
||||
testFileSize := int64(65535)
|
||||
usePubKey := true
|
||||
u := getTestUser(usePubKey)
|
||||
u.Filters.MaxUploadFileSize = testFileSize + 1
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
testFileSize1 := int64(131072)
|
||||
testFileName1 := "test_file1.dat"
|
||||
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
|
||||
err = createTestFile(testFilePath1, testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
remoteUpPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, "/")
|
||||
err = scpUpload(testFilePath1, remoteUpPath, false, false)
|
||||
assert.Error(t, err)
|
||||
err = scpUpload(testFilePath, remoteUpPath, false, false)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath1)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSCPVirtualFolders(t *testing.T) {
|
||||
if len(scpPath) == 0 {
|
||||
t.Skip("scp command not found, unable to execute this test")
|
||||
|
|
|
@ -152,6 +152,26 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row">
|
||||
<label for="idMaxUploadSize" class="col-sm-2 col-form-label">Max file upload size (bytes)</label>
|
||||
<div class="col-sm-3">
|
||||
<input type="number" class="form-control" id="idMaxUploadSize" name="max_upload_file_size" placeholder=""
|
||||
value="{{.User.Filters.MaxUploadFileSize}}" min="0" aria-describedby="fqsHelpBlock">
|
||||
<small id="fqsHelpBlock" class="form-text text-muted">
|
||||
0 means no limit
|
||||
</small>
|
||||
</div>
|
||||
<div class="col-sm-2"></div>
|
||||
<label for="idMaxSessions" class="col-sm-2 col-form-label">Max sessions</label>
|
||||
<div class="col-sm-3">
|
||||
<input type="number" class="form-control" id="idMaxSessions" name="max_sessions" placeholder=""
|
||||
value="{{.User.MaxSessions}}" min="0" aria-describedby="sessionsHelpBlock">
|
||||
<small id="sessionsHelpBlock" class="form-text text-muted">
|
||||
0 means no limit
|
||||
</small>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row">
|
||||
<label for="idUploadBandwidth" class="col-sm-2 col-form-label">Bandwidth UL (KB/s)</label>
|
||||
<div class="col-sm-3">
|
||||
|
@ -173,23 +193,14 @@
|
|||
</div>
|
||||
|
||||
<div class="form-group row">
|
||||
<label for="idMaxSessions" class="col-sm-2 col-form-label">Max sessions</label>
|
||||
<div class="col-sm-2">
|
||||
<input type="number" class="form-control" id="idMaxSessions" name="max_sessions" placeholder=""
|
||||
value="{{.User.MaxSessions}}" min="0" aria-describedby="sessionsHelpBlock">
|
||||
<small id="sessionsHelpBlock" class="form-text text-muted">
|
||||
0 means no limit
|
||||
</small>
|
||||
</div>
|
||||
<div class="col-sm-1"></div>
|
||||
<label for="idUID" class="col-sm-1 col-form-label">UID</label>
|
||||
<div class="col-sm-2">
|
||||
<label for="idUID" class="col-sm-2 col-form-label">UID</label>
|
||||
<div class="col-sm-3">
|
||||
<input type="number" class="form-control" id="idUID" name="uid" placeholder="" value="{{.User.UID}}" min="0"
|
||||
max="65535">
|
||||
</div>
|
||||
<div class="col-sm-1"></div>
|
||||
<label for="idGID" class="col-sm-1 col-form-label">GID</label>
|
||||
<div class="col-sm-2">
|
||||
<div class="col-sm-2"></div>
|
||||
<label for="idGID" class="col-sm-2 col-form-label">GID</label>
|
||||
<div class="col-sm-3">
|
||||
<input type="number" class="form-control" id="idGID" name="gid" placeholder="" value="{{.User.GID}}" min="0"
|
||||
max="65535">
|
||||
</div>
|
||||
|
|
|
@ -258,10 +258,13 @@ func (c *Connection) handleUploadToNewFile(resolvedPath, filePath, requestPath s
|
|||
|
||||
vfs.SetPathPermissions(c.Fs, filePath, c.User.GetUID(), c.User.GetGID())
|
||||
|
||||
// we can get an error only for resume
|
||||
maxWriteSize, _ := c.GetMaxWriteSize(quotaResult, false, 0)
|
||||
|
||||
baseTransfer := common.NewBaseTransfer(file, c.BaseConnection, cancelFn, resolvedPath, requestPath,
|
||||
common.TransferUpload, 0, 0, true)
|
||||
|
||||
return newWebDavFile(baseTransfer, w, nil, quotaResult.GetRemainingSize(), nil, c.Fs), nil
|
||||
return newWebDavFile(baseTransfer, w, nil, maxWriteSize, nil, c.Fs), nil
|
||||
}
|
||||
|
||||
func (c *Connection) handleUploadToExistingFile(resolvedPath, filePath string, fileSize int64,
|
||||
|
@ -273,6 +276,10 @@ func (c *Connection) handleUploadToExistingFile(resolvedPath, filePath string, f
|
|||
return nil, common.ErrQuotaExceeded
|
||||
}
|
||||
|
||||
// if there is a size limit remaining size cannot be 0 here, since quotaResult.HasSpace
|
||||
// will return false in this case and we deny the upload before
|
||||
maxWriteSize, _ := c.GetMaxWriteSize(quotaResult, false, fileSize)
|
||||
|
||||
if common.Config.IsAtomicUploadEnabled() && c.Fs.IsAtomicUploadSupported() {
|
||||
err = c.Fs.Rename(resolvedPath, filePath)
|
||||
if err != nil {
|
||||
|
@ -288,9 +295,6 @@ func (c *Connection) handleUploadToExistingFile(resolvedPath, filePath string, f
|
|||
return nil, c.GetFsError(err)
|
||||
}
|
||||
initialSize := int64(0)
|
||||
// if there is a size limit remaining size cannot be 0 here, since quotaResult.HasSpace
|
||||
// will return false in this case and we deny the upload before
|
||||
maxWriteSize := quotaResult.GetRemainingSize()
|
||||
if vfs.IsLocalOsFs(c.Fs) {
|
||||
vfolder, err := c.User.GetVirtualFolderForPath(path.Dir(requestPath))
|
||||
if err == nil {
|
||||
|
@ -304,9 +308,6 @@ func (c *Connection) handleUploadToExistingFile(resolvedPath, filePath string, f
|
|||
} else {
|
||||
initialSize = fileSize
|
||||
}
|
||||
if maxWriteSize > 0 {
|
||||
maxWriteSize += fileSize
|
||||
}
|
||||
|
||||
vfs.SetPathPermissions(c.Fs, filePath, c.User.GetUID(), c.User.GetGID())
|
||||
|
||||
|
|
|
@ -635,6 +635,36 @@ func TestQuotaLimits(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUploadMaxSize(t *testing.T) {
|
||||
testFileSize := int64(65535)
|
||||
u := getTestUser()
|
||||
u.Filters.MaxUploadFileSize = testFileSize + 1
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
testFileSize1 := int64(131072)
|
||||
testFileName1 := "test_file_dav1.dat"
|
||||
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
|
||||
err = createTestFile(testFilePath1, testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
err = uploadFile(testFilePath1, testFileName1, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath1)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestClientClose(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.UploadBandwidth = 64
|
||||
|
|
Loading…
Reference in a new issue