This commit is contained in:
Nicola Murino 2021-02-11 19:45:52 +01:00
parent 4ddfe41f23
commit 51f110bc7b
No known key found for this signature in database
GPG key ID: 2F1FB59433D5A8CB
31 changed files with 428 additions and 77 deletions

View file

@ -325,7 +325,7 @@ func parsePatternsFilesFilters() []dataprovider.PatternsFilter {
var patterns []dataprovider.PatternsFilter var patterns []dataprovider.PatternsFilter
for _, val := range portableAllowedPatterns { for _, val := range portableAllowedPatterns {
p, exts := getPatternsFilterValues(strings.TrimSpace(val)) p, exts := getPatternsFilterValues(strings.TrimSpace(val))
if len(p) > 0 { if p != "" {
patterns = append(patterns, dataprovider.PatternsFilter{ patterns = append(patterns, dataprovider.PatternsFilter{
Path: path.Clean(p), Path: path.Clean(p),
AllowedPatterns: exts, AllowedPatterns: exts,
@ -335,7 +335,7 @@ func parsePatternsFilesFilters() []dataprovider.PatternsFilter {
} }
for _, val := range portableDeniedPatterns { for _, val := range portableDeniedPatterns {
p, exts := getPatternsFilterValues(strings.TrimSpace(val)) p, exts := getPatternsFilterValues(strings.TrimSpace(val))
if len(p) > 0 { if p != "" {
found := false found := false
for index, e := range patterns { for index, e := range patterns {
if path.Clean(e.Path) == path.Clean(p) { if path.Clean(e.Path) == path.Clean(p) {
@ -364,7 +364,7 @@ func getPatternsFilterValues(value string) (string, []string) {
exts := []string{} exts := []string{}
for _, e := range strings.Split(dirExts[1], ",") { for _, e := range strings.Split(dirExts[1], ",") {
cleanedExt := strings.TrimSpace(e) cleanedExt := strings.TrimSpace(e)
if len(cleanedExt) > 0 { if cleanedExt != "" {
exts = append(exts, cleanedExt) exts = append(exts, cleanedExt)
} }
} }

View file

@ -705,7 +705,7 @@ func (c ConnectionStatus) GetConnectionInfo() string {
func (c ConnectionStatus) GetTransfersAsString() string { func (c ConnectionStatus) GetTransfersAsString() string {
result := "" result := ""
for _, t := range c.Transfers { for _, t := range c.Transfers {
if len(result) > 0 { if result != "" {
result += ". " result += ". "
} }
result += t.getConnectionTransferAsString() result += t.getConnectionTransferAsString()

View file

@ -683,7 +683,7 @@ func (c *BaseConnection) hasSpaceForRename(virtualSourcePath, virtualTargetPath
// rename between user root dir and a virtual folder included in user quota // rename between user root dir and a virtual folder included in user quota
return true return true
} }
quotaResult := c.HasSpace(true, virtualTargetPath) quotaResult := c.HasSpace(true, false, virtualTargetPath)
return c.hasSpaceForCrossRename(quotaResult, initialSize, fsSourcePath) return c.hasSpaceForCrossRename(quotaResult, initialSize, fsSourcePath)
} }
@ -774,7 +774,7 @@ func (c *BaseConnection) GetMaxWriteSize(quotaResult vfs.QuotaCheckResult, isRes
} }
// HasSpace checks user's quota usage // HasSpace checks user's quota usage
func (c *BaseConnection) HasSpace(checkFiles bool, requestPath string) vfs.QuotaCheckResult { func (c *BaseConnection) HasSpace(checkFiles, getUsage bool, requestPath string) vfs.QuotaCheckResult {
result := vfs.QuotaCheckResult{ result := vfs.QuotaCheckResult{
HasSpace: true, HasSpace: true,
AllowedSize: 0, AllowedSize: 0,
@ -792,14 +792,14 @@ func (c *BaseConnection) HasSpace(checkFiles bool, requestPath string) vfs.Quota
var vfolder vfs.VirtualFolder var vfolder vfs.VirtualFolder
vfolder, err = c.User.GetVirtualFolderForPath(path.Dir(requestPath)) vfolder, err = c.User.GetVirtualFolderForPath(path.Dir(requestPath))
if err == nil && !vfolder.IsIncludedInUserQuota() { if err == nil && !vfolder.IsIncludedInUserQuota() {
if vfolder.HasNoQuotaRestrictions(checkFiles) { if vfolder.HasNoQuotaRestrictions(checkFiles) && !getUsage {
return result return result
} }
result.QuotaSize = vfolder.QuotaSize result.QuotaSize = vfolder.QuotaSize
result.QuotaFiles = vfolder.QuotaFiles result.QuotaFiles = vfolder.QuotaFiles
result.UsedFiles, result.UsedSize, err = dataprovider.GetUsedVirtualFolderQuota(vfolder.Name) result.UsedFiles, result.UsedSize, err = dataprovider.GetUsedVirtualFolderQuota(vfolder.Name)
} else { } else {
if c.User.HasNoQuotaRestrictions(checkFiles) { if c.User.HasNoQuotaRestrictions(checkFiles) && !getUsage {
return result return result
} }
result.QuotaSize = c.User.QuotaSize result.QuotaSize = c.User.QuotaSize
@ -981,9 +981,13 @@ func (c *BaseConnection) GetOpUnsupportedError() error {
func (c *BaseConnection) GetGenericError(err error) error { func (c *BaseConnection) GetGenericError(err error) error {
switch c.protocol { switch c.protocol {
case ProtocolSFTP: case ProtocolSFTP:
if err == vfs.ErrStorageSizeUnavailable {
return sftp.ErrSSHFxOpUnsupported
}
return sftp.ErrSSHFxFailure return sftp.ErrSSHFxFailure
default: default:
if err == ErrPermissionDenied || err == ErrNotExist || err == ErrOpUnsupported || err == ErrQuotaExceeded { if err == ErrPermissionDenied || err == ErrNotExist || err == ErrOpUnsupported ||
err == ErrQuotaExceeded || err == vfs.ErrStorageSizeUnavailable {
return err return err
} }
return ErrGenericFailure return ErrGenericFailure

View file

@ -917,7 +917,7 @@ func TestHasSpaceForRename(t *testing.T) {
c := NewBaseConnection("", ProtocolSFTP, user, fs) c := NewBaseConnection("", ProtocolSFTP, user, fs)
// with quota tracking disabled hasSpaceForRename will always return true // with quota tracking disabled hasSpaceForRename will always return true
assert.True(t, c.hasSpaceForRename("", "", 0, "")) assert.True(t, c.hasSpaceForRename("", "", 0, ""))
quotaResult := c.HasSpace(true, "") quotaResult := c.HasSpace(true, false, "")
assert.True(t, quotaResult.HasSpace) assert.True(t, quotaResult.HasSpace)
err = closeDataprovider() err = closeDataprovider()
@ -1028,7 +1028,7 @@ func TestHasSpace(t *testing.T) {
fs, err := user.GetFilesystem("id") fs, err := user.GetFilesystem("id")
assert.NoError(t, err) assert.NoError(t, err)
c := NewBaseConnection("", ProtocolSFTP, user, fs) c := NewBaseConnection("", ProtocolSFTP, user, fs)
quotaResult := c.HasSpace(true, "/") quotaResult := c.HasSpace(true, false, "/")
assert.True(t, quotaResult.HasSpace) assert.True(t, quotaResult.HasSpace)
user.VirtualFolders[0].QuotaFiles = 0 user.VirtualFolders[0].QuotaFiles = 0
@ -1038,7 +1038,7 @@ func TestHasSpace(t *testing.T) {
user, err = dataprovider.UserExists(user.Username) user, err = dataprovider.UserExists(user.Username)
assert.NoError(t, err) assert.NoError(t, err)
c.User = user c.User = user
quotaResult = c.HasSpace(true, "/vdir/file") quotaResult = c.HasSpace(true, false, "/vdir/file")
assert.True(t, quotaResult.HasSpace) assert.True(t, quotaResult.HasSpace)
user.VirtualFolders[0].QuotaFiles = 10 user.VirtualFolders[0].QuotaFiles = 10
@ -1046,17 +1046,17 @@ func TestHasSpace(t *testing.T) {
err = dataprovider.UpdateUser(&user) err = dataprovider.UpdateUser(&user)
assert.NoError(t, err) assert.NoError(t, err)
c.User = user c.User = user
quotaResult = c.HasSpace(true, "/vdir/file1") quotaResult = c.HasSpace(true, false, "/vdir/file1")
assert.True(t, quotaResult.HasSpace) assert.True(t, quotaResult.HasSpace)
quotaResult = c.HasSpace(true, "/file") quotaResult = c.HasSpace(true, false, "/file")
assert.True(t, quotaResult.HasSpace) assert.True(t, quotaResult.HasSpace)
folder, err := dataprovider.GetFolderByName(folderName) folder, err := dataprovider.GetFolderByName(folderName)
assert.NoError(t, err) assert.NoError(t, err)
err = dataprovider.UpdateVirtualFolderQuota(folder, 10, 1048576, true) err = dataprovider.UpdateVirtualFolderQuota(folder, 10, 1048576, true)
assert.NoError(t, err) assert.NoError(t, err)
quotaResult = c.HasSpace(true, "/vdir/file1") quotaResult = c.HasSpace(true, false, "/vdir/file1")
assert.False(t, quotaResult.HasSpace) assert.False(t, quotaResult.HasSpace)
err = dataprovider.DeleteUser(user.Username) err = dataprovider.DeleteUser(user.Username)
@ -1199,6 +1199,12 @@ func TestErrorsMapping(t *testing.T) {
} else { } else {
assert.EqualError(t, err, ErrOpUnsupported.Error()) assert.EqualError(t, err, ErrOpUnsupported.Error())
} }
err = conn.GetFsError(vfs.ErrStorageSizeUnavailable)
if protocol == ProtocolSFTP {
assert.EqualError(t, err, sftp.ErrSSHFxOpUnsupported.Error())
} else {
assert.EqualError(t, err, vfs.ErrStorageSizeUnavailable.Error())
}
err = conn.GetFsError(nil) err = conn.GetFsError(nil)
assert.NoError(t, err) assert.NoError(t, err)
err = conn.GetOpUnsupportedError() err = conn.GetOpUnsupportedError()

View file

@ -400,7 +400,7 @@ func LoadConfig(configDir, configFile string) error {
if strings.TrimSpace(globalConf.FTPD.Banner) == "" { if strings.TrimSpace(globalConf.FTPD.Banner) == "" {
globalConf.FTPD.Banner = defaultFTPDBanner globalConf.FTPD.Banner = defaultFTPDBanner
} }
if len(globalConf.ProviderConf.UsersBaseDir) > 0 && !utils.IsFileInputValid(globalConf.ProviderConf.UsersBaseDir) { if globalConf.ProviderConf.UsersBaseDir != "" && !utils.IsFileInputValid(globalConf.ProviderConf.UsersBaseDir) {
err = fmt.Errorf("invalid users base dir %#v will be ignored", globalConf.ProviderConf.UsersBaseDir) err = fmt.Errorf("invalid users base dir %#v will be ignored", globalConf.ProviderConf.UsersBaseDir)
globalConf.ProviderConf.UsersBaseDir = "" globalConf.ProviderConf.UsersBaseDir = ""
logger.Warn(logSender, "", "Configuration error: %v", err) logger.Warn(logSender, "", "Configuration error: %v", err)
@ -455,7 +455,7 @@ func checkCommonParamsCompatibility() {
logger.WarnToConsole("sftpd.idle_timeout is deprecated, please use common.idle_timeout") logger.WarnToConsole("sftpd.idle_timeout is deprecated, please use common.idle_timeout")
globalConf.Common.IdleTimeout = globalConf.SFTPD.IdleTimeout //nolint:staticcheck globalConf.Common.IdleTimeout = globalConf.SFTPD.IdleTimeout //nolint:staticcheck
} }
if len(globalConf.SFTPD.Actions.Hook) > 0 && len(globalConf.Common.Actions.Hook) == 0 { //nolint:staticcheck if globalConf.SFTPD.Actions.Hook != "" && len(globalConf.Common.Actions.Hook) == 0 { //nolint:staticcheck
logger.Warn(logSender, "", "sftpd.actions is deprecated, please use common.actions") logger.Warn(logSender, "", "sftpd.actions is deprecated, please use common.actions")
logger.WarnToConsole("sftpd.actions is deprecated, please use common.actions") logger.WarnToConsole("sftpd.actions is deprecated, please use common.actions")
globalConf.Common.Actions.ExecuteOn = globalConf.SFTPD.Actions.ExecuteOn //nolint:staticcheck globalConf.Common.Actions.ExecuteOn = globalConf.SFTPD.Actions.ExecuteOn //nolint:staticcheck

View file

@ -2015,7 +2015,7 @@ func executePreLoginHook(username, loginMethod, ip, protocol string) (User, erro
if err != nil { if err != nil {
return u, fmt.Errorf("Pre-login hook error: %v", err) return u, fmt.Errorf("Pre-login hook error: %v", err)
} }
if len(strings.TrimSpace(string(out))) == 0 { if strings.TrimSpace(string(out)) == "" {
providerLog(logger.LevelDebug, "empty response from pre-login hook, no modification requested for user %#v id: %v", providerLog(logger.LevelDebug, "empty response from pre-login hook, no modification requested for user %#v id: %v",
username, u.ID) username, u.ID)
if u.ID == 0 { if u.ID == 0 {
@ -2182,13 +2182,13 @@ func doExternalAuth(username, password string, pubKey []byte, keyboardInteractiv
if err != nil { if err != nil {
return user, fmt.Errorf("Invalid external auth response: %v", err) return user, fmt.Errorf("Invalid external auth response: %v", err)
} }
if len(user.Username) == 0 { if user.Username == "" {
return user, ErrInvalidCredentials return user, ErrInvalidCredentials
} }
if len(password) > 0 { if password != "" {
user.Password = password user.Password = password
} }
if len(pkey) > 0 && !utils.IsStringPrefixInSlice(pkey, user.PublicKeys) { if pkey != "" && !utils.IsStringPrefixInSlice(pkey, user.PublicKeys) {
user.PublicKeys = append(user.PublicKeys, pkey) user.PublicKeys = append(user.PublicKeys, pkey)
} }
// some users want to map multiple login usernames with a single SFTPGo account // some users want to map multiple login usernames with a single SFTPGo account

View file

@ -718,22 +718,16 @@ func (u *User) GetQuotaSummary() string {
func (u *User) GetPermissionsAsString() string { func (u *User) GetPermissionsAsString() string {
result := "" result := ""
for dir, perms := range u.Permissions { for dir, perms := range u.Permissions {
var dirPerms string dirPerms := strings.Join(perms, ", ")
for _, p := range perms {
if len(dirPerms) > 0 {
dirPerms += ", "
}
dirPerms += p
}
dp := fmt.Sprintf("%#v: %#v", dir, dirPerms) dp := fmt.Sprintf("%#v: %#v", dir, dirPerms)
if dir == "/" { if dir == "/" {
if len(result) > 0 { if result != "" {
result = dp + ", " + result result = dp + ", " + result
} else { } else {
result = dp result = dp
} }
} else { } else {
if len(result) > 0 { if result != "" {
result += ", " result += ", "
} }
result += dp result += dp

View file

@ -62,7 +62,7 @@ docker run --name some-sftpgo \
-p 2022:2022 \ -p 2022:2022 \
--mount type=bind,source=/my/own/sftpgodata,target=/srv/sftpgo \ --mount type=bind,source=/my/own/sftpgodata,target=/srv/sftpgo \
--mount type=bind,source=/my/own/sftpgohome,target=/var/lib/sftpgo \ --mount type=bind,source=/my/own/sftpgohome,target=/var/lib/sftpgo \
-e SFTPGO_HTTPD__BIND_PORT=8090 \ -e SFTPGO_HTTPD__BINDINGS__0__PORT=8090 \
-d "drakkan/sftpgo:tag" -d "drakkan/sftpgo:tag"
``` ```

View file

@ -16,6 +16,7 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"testing" "testing"
"time" "time"
@ -1585,7 +1586,7 @@ func TestAllocateAvailable(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
func TestAvailableUnsupportedFs(t *testing.T) { func TestAvailableSFTPFs(t *testing.T) {
u := getTestUser() u := getTestUser()
localUser, _, err := httpdtest.AddUser(u, http.StatusCreated) localUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
assert.NoError(t, err) assert.NoError(t, err)
@ -1593,10 +1594,12 @@ func TestAvailableUnsupportedFs(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
client, err := getFTPClient(sftpUser, false) client, err := getFTPClient(sftpUser, false)
if assert.NoError(t, err) { if assert.NoError(t, err) {
code, response, err := client.SendCustomCommand("AVBL") code, response, err := client.SendCustomCommand("AVBL /")
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, ftp.StatusFileUnavailable, code) assert.Equal(t, ftp.StatusFile, code)
assert.Contains(t, response, "unable to get available size for this storage backend") avblSize, err := strconv.ParseInt(response, 10, 64)
assert.NoError(t, err)
assert.Greater(t, avblSize, int64(0))
err = client.Quit() err = client.Quit()
assert.NoError(t, err) assert.NoError(t, err)

View file

@ -213,8 +213,7 @@ func (c *Connection) Chtimes(name string, atime time.Time, mtime time.Time) erro
func (c *Connection) GetAvailableSpace(dirName string) (int64, error) { func (c *Connection) GetAvailableSpace(dirName string) (int64, error) {
c.UpdateLastActivity() c.UpdateLastActivity()
quotaResult := c.HasSpace(false, path.Join(dirName, "fakefile.txt")) quotaResult := c.HasSpace(false, false, path.Join(dirName, "fakefile.txt"))
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
return 0, nil return 0, nil
} }
@ -230,7 +229,11 @@ func (c *Connection) GetAvailableSpace(dirName string) (int64, error) {
return 0, c.GetFsError(err) return 0, c.GetFsError(err)
} }
return c.Fs.GetAvailableDiskSize(p) statVFS, err := c.Fs.GetAvailableDiskSize(p)
if err != nil {
return 0, c.GetFsError(err)
}
return int64(statVFS.FreeSpace()), nil
} }
// the available space is the minimum between MaxUploadFileSize, if setted, // the available space is the minimum between MaxUploadFileSize, if setted,
@ -260,7 +263,7 @@ func (c *Connection) AllocateSpace(size int) error {
folders = append(folders, path.Join(v.VirtualPath, "fakefile.txt")) folders = append(folders, path.Join(v.VirtualPath, "fakefile.txt"))
} }
for _, f := range folders { for _, f := range folders {
quotaResult := c.HasSpace(false, f) quotaResult := c.HasSpace(false, false, f)
if quotaResult.HasSpace { if quotaResult.HasSpace {
if quotaResult.QuotaSize == 0 { if quotaResult.QuotaSize == 0 {
// unlimited size is allowed // unlimited size is allowed
@ -393,7 +396,7 @@ func (c *Connection) uploadFile(fsPath, ftpPath string, flags int) (ftpserver.Fi
} }
func (c *Connection) handleFTPUploadToNewFile(resolvedPath, filePath, requestPath string) (ftpserver.FileTransfer, error) { func (c *Connection) handleFTPUploadToNewFile(resolvedPath, filePath, requestPath string) (ftpserver.FileTransfer, error) {
quotaResult := c.HasSpace(true, requestPath) quotaResult := c.HasSpace(true, false, requestPath)
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
c.Log(logger.LevelInfo, "denying file write due to quota limits") c.Log(logger.LevelInfo, "denying file write due to quota limits")
return nil, common.ErrQuotaExceeded return nil, common.ErrQuotaExceeded
@ -419,7 +422,7 @@ func (c *Connection) handleFTPUploadToNewFile(resolvedPath, filePath, requestPat
func (c *Connection) handleFTPUploadToExistingFile(flags int, resolvedPath, filePath string, fileSize int64, func (c *Connection) handleFTPUploadToExistingFile(flags int, resolvedPath, filePath string, fileSize int64,
requestPath string) (ftpserver.FileTransfer, error) { requestPath string) (ftpserver.FileTransfer, error) {
var err error var err error
quotaResult := c.HasSpace(false, requestPath) quotaResult := c.HasSpace(false, false, requestPath)
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
c.Log(logger.LevelInfo, "denying file write due to quota limits") c.Log(logger.LevelInfo, "denying file write due to quota limits")
return nil, common.ErrQuotaExceeded return nil, common.ErrQuotaExceeded

View file

@ -616,6 +616,27 @@ func TestUploadFileStatError(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
func TestAVBLErrors(t *testing.T) {
user := dataprovider.User{
Username: "user",
HomeDir: filepath.Clean(os.TempDir()),
}
user.Permissions = make(map[string][]string)
user.Permissions["/"] = []string{dataprovider.PermAny}
mockCC := mockFTPClientContext{}
connID := fmt.Sprintf("%v", mockCC.ID())
fs := newMockOsFs(nil, nil, false, connID, user.GetHomeDir())
connection := &Connection{
BaseConnection: common.NewBaseConnection(connID, common.ProtocolFTP, user, fs),
clientContext: mockCC,
}
_, err := connection.GetAvailableSpace("/")
assert.NoError(t, err)
_, err = connection.GetAvailableSpace("/missing-path")
assert.Error(t, err)
assert.True(t, os.IsNotExist(err))
}
func TestUploadOverwriteErrors(t *testing.T) { func TestUploadOverwriteErrors(t *testing.T) {
user := dataprovider.User{ user := dataprovider.User{
Username: "user", Username: "user",

2
go.mod
View file

@ -71,7 +71,7 @@ require (
replace ( replace (
github.com/jlaffaye/ftp => github.com/drakkan/ftp v0.0.0-20201114075148-9b9adce499a9 github.com/jlaffaye/ftp => github.com/drakkan/ftp v0.0.0-20201114075148-9b9adce499a9
github.com/pkg/sftp => github.com/drakkan/sftp v0.0.0-20201211115031-0b6bbc64f191 github.com/pkg/sftp => github.com/drakkan/sftp v0.0.0-20210210202350-a2b46fc9c0d5
golang.org/x/crypto => github.com/drakkan/crypto v0.0.0-20201217113543-470e61ed2598 golang.org/x/crypto => github.com/drakkan/crypto v0.0.0-20201217113543-470e61ed2598
golang.org/x/net => github.com/drakkan/net v0.0.0-20210201075003-5fb2b186574d golang.org/x/net => github.com/drakkan/net v0.0.0-20210201075003-5fb2b186574d
) )

4
go.sum
View file

@ -173,8 +173,8 @@ github.com/drakkan/ftp v0.0.0-20201114075148-9b9adce499a9 h1:LPH1dEblAOO/LoG7yHP
github.com/drakkan/ftp v0.0.0-20201114075148-9b9adce499a9/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU= github.com/drakkan/ftp v0.0.0-20201114075148-9b9adce499a9/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
github.com/drakkan/net v0.0.0-20210201075003-5fb2b186574d h1:h2rU/lTUkEYB3y4k6+FgQNMajf4uE+sbMRn85kT+VTQ= github.com/drakkan/net v0.0.0-20210201075003-5fb2b186574d h1:h2rU/lTUkEYB3y4k6+FgQNMajf4uE+sbMRn85kT+VTQ=
github.com/drakkan/net v0.0.0-20210201075003-5fb2b186574d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= github.com/drakkan/net v0.0.0-20210201075003-5fb2b186574d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
github.com/drakkan/sftp v0.0.0-20201211115031-0b6bbc64f191 h1:c+RLqMs6Aqc8IDc5MWTf+zqNlO4+5WfiJqZzHFlr4a8= github.com/drakkan/sftp v0.0.0-20210210202350-a2b46fc9c0d5 h1:jVxjoPrGY9Ypw65tTHRdDvumOE3ys2fLZfvFT6+gFPU=
github.com/drakkan/sftp v0.0.0-20201211115031-0b6bbc64f191/go.mod h1:fUqqXB5vEgVCZ131L+9say31RAri6aF6KDViawhxKK8= github.com/drakkan/sftp v0.0.0-20210210202350-a2b46fc9c0d5/go.mod h1:fUqqXB5vEgVCZ131L+9say31RAri6aF6KDViawhxKK8=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
NFPM_VERSION=2.2.3 NFPM_VERSION=2.2.4
NFPM_ARCH=${NFPM_ARCH:-amd64} NFPM_ARCH=${NFPM_ARCH:-amd64}
if [ -z ${SFTPGO_VERSION} ] if [ -z ${SFTPGO_VERSION} ]
then then

View file

@ -272,7 +272,7 @@ func (s *Service) loadInitialData() error {
func (s *Service) restoreDump(dump dataprovider.BackupData) error { func (s *Service) restoreDump(dump dataprovider.BackupData) error {
err := httpd.RestoreAdmins(dump.Admins, s.LoadDataFrom, s.LoadDataMode) err := httpd.RestoreAdmins(dump.Admins, s.LoadDataFrom, s.LoadDataMode)
if err != nil { if err != nil {
return fmt.Errorf("unable to restore folders from file %#v: %v", s.LoadDataFrom, err) return fmt.Errorf("unable to restore admins from file %#v: %v", s.LoadDataFrom, err)
} }
err = httpd.RestoreFolders(dump.Folders, s.LoadDataFrom, s.LoadDataMode, s.LoadDataQuotaScan) err = httpd.RestoreFolders(dump.Folders, s.LoadDataFrom, s.LoadDataMode, s.LoadDataQuotaScan)
if err != nil { if err != nil {

View file

@ -253,11 +253,43 @@ func (c *Connection) Lstat(request *sftp.Request) (sftp.ListerAt, error) {
return listerAt([]os.FileInfo{s}), nil return listerAt([]os.FileInfo{s}), nil
} }
// StatVFS implements StatVFSFileCmder interface
func (c *Connection) StatVFS(r *sftp.Request) (*sftp.StatVFS, error) {
c.UpdateLastActivity()
// we are assuming that r.Filepath is a dir, this could be wrong but should
// not produce any side effect here.
// we don't consider c.User.Filters.MaxUploadFileSize, we return disk stats here
// not the limit for a single file upload
quotaResult := c.HasSpace(true, true, path.Join(r.Filepath, "fakefile.txt"))
p, err := c.Fs.ResolvePath(r.Filepath)
if err != nil {
return nil, c.GetFsError(err)
}
if !quotaResult.HasSpace {
return c.getStatVFSFromQuotaResult(p, quotaResult), nil
}
if quotaResult.QuotaSize == 0 && quotaResult.QuotaFiles == 0 {
// no quota restrictions
statvfs, err := c.Fs.GetAvailableDiskSize(p)
if err == vfs.ErrStorageSizeUnavailable {
return c.getStatVFSFromQuotaResult(p, quotaResult), nil
}
return statvfs, err
}
// there is free space but some limits are configured
return c.getStatVFSFromQuotaResult(p, quotaResult), nil
}
func (c *Connection) getSFTPCmdTargetPath(requestTarget string) (string, error) { func (c *Connection) getSFTPCmdTargetPath(requestTarget string) (string, error) {
var target string var target string
// If a target is provided in this request validate that it is going to the correct // If a target is provided in this request validate that it is going to the correct
// location for the server. If it is not, return an error // location for the server. If it is not, return an error
if len(requestTarget) > 0 { if requestTarget != "" {
var err error var err error
target, err = c.Fs.ResolvePath(requestTarget) target, err = c.Fs.ResolvePath(requestTarget)
if err != nil { if err != nil {
@ -309,7 +341,7 @@ func (c *Connection) handleSFTPRemove(filePath string, request *sftp.Request) er
} }
func (c *Connection) handleSFTPUploadToNewFile(resolvedPath, filePath, requestPath string, errForRead error) (sftp.WriterAtReaderAt, error) { func (c *Connection) handleSFTPUploadToNewFile(resolvedPath, filePath, requestPath string, errForRead error) (sftp.WriterAtReaderAt, error) {
quotaResult := c.HasSpace(true, requestPath) quotaResult := c.HasSpace(true, false, requestPath)
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
c.Log(logger.LevelInfo, "denying file write due to quota limits") c.Log(logger.LevelInfo, "denying file write due to quota limits")
return nil, sftp.ErrSSHFxFailure return nil, sftp.ErrSSHFxFailure
@ -336,7 +368,7 @@ func (c *Connection) handleSFTPUploadToNewFile(resolvedPath, filePath, requestPa
func (c *Connection) handleSFTPUploadToExistingFile(pflags sftp.FileOpenFlags, resolvedPath, filePath string, func (c *Connection) handleSFTPUploadToExistingFile(pflags sftp.FileOpenFlags, resolvedPath, filePath string,
fileSize int64, requestPath string, errForRead error) (sftp.WriterAtReaderAt, error) { fileSize int64, requestPath string, errForRead error) (sftp.WriterAtReaderAt, error) {
var err error var err error
quotaResult := c.HasSpace(false, requestPath) quotaResult := c.HasSpace(false, false, requestPath)
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
c.Log(logger.LevelInfo, "denying file write due to quota limits") c.Log(logger.LevelInfo, "denying file write due to quota limits")
return nil, sftp.ErrSSHFxFailure return nil, sftp.ErrSSHFxFailure
@ -406,6 +438,52 @@ func (c *Connection) Disconnect() error {
return c.channel.Close() return c.channel.Close()
} }
func (c *Connection) getStatVFSFromQuotaResult(name string, quotaResult vfs.QuotaCheckResult) *sftp.StatVFS {
if quotaResult.QuotaSize == 0 || quotaResult.QuotaFiles == 0 {
s, err := c.Fs.GetAvailableDiskSize(name)
if err == nil {
if quotaResult.QuotaSize == 0 {
quotaResult.QuotaSize = int64(s.TotalSpace())
}
if quotaResult.QuotaFiles == 0 {
quotaResult.QuotaFiles = int(s.Files)
}
}
}
// if we are unable to get quota size or quota files we add some arbitrary values
if quotaResult.QuotaSize == 0 {
quotaResult.QuotaSize = quotaResult.UsedSize + 8*1024*1024*1024*1024 // 8TB
}
if quotaResult.QuotaFiles == 0 {
quotaResult.QuotaFiles = quotaResult.UsedFiles + 1000000 // 1 million
}
bsize := uint64(4096)
for bsize > uint64(quotaResult.QuotaSize) {
bsize = bsize / 4
}
blocks := uint64(quotaResult.QuotaSize) / bsize
bfree := uint64(quotaResult.QuotaSize-quotaResult.UsedSize) / bsize
files := uint64(quotaResult.QuotaFiles)
ffree := uint64(quotaResult.QuotaFiles - quotaResult.UsedFiles)
if !quotaResult.HasSpace {
bfree = 0
ffree = 0
}
return &sftp.StatVFS{
Bsize: bsize,
Frsize: bsize,
Blocks: blocks,
Bfree: bfree,
Bavail: bfree,
Files: files,
Ffree: ffree,
Favail: ffree,
Namemax: 255,
}
}
func getOSOpenFlags(requestFlags sftp.FileOpenFlags) (flags int) { func getOSOpenFlags(requestFlags sftp.FileOpenFlags) (flags int) {
var osFlags int var osFlags int
if requestFlags.Read && requestFlags.Write { if requestFlags.Read && requestFlags.Write {

View file

@ -376,6 +376,11 @@ func TestWithInvalidHome(t *testing.T) {
} }
_, err = c.Fs.ResolvePath("../upper_path") _, err = c.Fs.ResolvePath("../upper_path")
assert.Error(t, err, "tested path is not a home subdir") assert.Error(t, err, "tested path is not a home subdir")
_, err = c.StatVFS(&sftp.Request{
Method: "StatVFS",
Filepath: "../unresolvable-path",
})
assert.Error(t, err)
} }
func TestSFTPCmdTargetPath(t *testing.T) { func TestSFTPCmdTargetPath(t *testing.T) {
@ -408,7 +413,7 @@ func TestSFTPGetUsedQuota(t *testing.T) {
connection := Connection{ connection := Connection{
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, u, nil), BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, u, nil),
} }
quotaResult := connection.HasSpace(false, "/") quotaResult := connection.HasSpace(false, false, "/")
assert.False(t, quotaResult.HasSpace) assert.False(t, quotaResult.HasSpace)
} }

View file

@ -191,7 +191,7 @@ func (c *scpCommand) getUploadFileData(sizeToRead int64, transfer *transfer) err
} }
func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead int64, isNewFile bool, fileSize int64, requestPath string) error { func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead int64, isNewFile bool, fileSize int64, requestPath string) error {
quotaResult := c.connection.HasSpace(isNewFile, requestPath) quotaResult := c.connection.HasSpace(isNewFile, false, requestPath)
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
err := fmt.Errorf("denying file write due to quota limits") err := fmt.Errorf("denying file write due to quota limits")
c.connection.Log(logger.LevelWarn, "error uploading file: %#v, err: %v", filePath, err) c.connection.Log(logger.LevelWarn, "error uploading file: %#v, err: %v", filePath, err)

View file

@ -33,7 +33,7 @@ const (
) )
var ( var (
sftpExtensions = []string{"posix-rename@openssh.com"} sftpExtensions = []string{"statvfs@openssh.com"}
) )
// Binding defines the configuration for a network listener // Binding defines the configuration for a network listener

View file

@ -456,6 +456,12 @@ func TestBasicSFTPFsHandling(t *testing.T) {
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles) assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize) assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
stat, err := client.StatVFS("/")
assert.NoError(t, err)
assert.Equal(t, uint64(u.QuotaSize/4096), stat.Blocks)
assert.Equal(t, uint64((u.QuotaSize-testFileSize)/4096), stat.Bfree)
assert.Equal(t, uint64(1), stat.Files-stat.Ffree)
err = os.Remove(testFilePath) err = os.Remove(testFilePath)
assert.NoError(t, err) assert.NoError(t, err)
err = os.Remove(localDownloadPath) err = os.Remove(localDownloadPath)
@ -6371,6 +6377,136 @@ func TestGetVirtualFolderForPath(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
func TestStatVFS(t *testing.T) {
usePubKey := false
user, _, err := httpdtest.AddUser(getTestUser(usePubKey), http.StatusCreated)
assert.NoError(t, err)
testFileSize := int64(65535)
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
stat, err := client.StatVFS("/")
assert.NoError(t, err)
assert.Greater(t, stat.ID, uint32(0))
assert.Greater(t, stat.Blocks, uint64(0))
assert.Greater(t, stat.Bsize, uint64(0))
_, err = client.StatVFS("missing-path")
assert.Error(t, err)
assert.True(t, os.IsNotExist(err))
}
user.QuotaFiles = 100
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
client, err = getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
assert.NoError(t, err)
err = os.Remove(testFilePath)
assert.NoError(t, err)
stat, err := client.StatVFS("/")
assert.NoError(t, err)
assert.Greater(t, stat.ID, uint32(0))
assert.Greater(t, stat.Blocks, uint64(0))
assert.Greater(t, stat.Bsize, uint64(0))
assert.Equal(t, uint64(100), stat.Files)
assert.Equal(t, uint64(99), stat.Ffree)
}
user.QuotaSize = 8192
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
client, err = getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
stat, err := client.StatVFS("/")
assert.NoError(t, err)
assert.Greater(t, stat.ID, uint32(0))
assert.Greater(t, stat.Blocks, uint64(0))
assert.Greater(t, stat.Bsize, uint64(0))
assert.Equal(t, uint64(100), stat.Files)
assert.Equal(t, uint64(0), stat.Ffree)
assert.Equal(t, uint64(2), stat.Blocks)
assert.Equal(t, uint64(0), stat.Bfree)
}
user.QuotaFiles = 0
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
client, err = getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
stat, err := client.StatVFS("/")
assert.NoError(t, err)
assert.Greater(t, stat.ID, uint32(0))
assert.Greater(t, stat.Blocks, uint64(0))
assert.Greater(t, stat.Bsize, uint64(0))
assert.Greater(t, stat.Files, uint64(0))
assert.Equal(t, uint64(0), stat.Ffree)
assert.Equal(t, uint64(2), stat.Blocks)
assert.Equal(t, uint64(0), stat.Bfree)
}
user.QuotaSize = 1
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
client, err = getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
stat, err := client.StatVFS("/")
assert.NoError(t, err)
assert.Greater(t, stat.ID, uint32(0))
assert.Equal(t, uint64(1), stat.Blocks)
assert.Equal(t, uint64(1), stat.Bsize)
assert.Greater(t, stat.Files, uint64(0))
assert.Equal(t, uint64(0), stat.Ffree)
assert.Equal(t, uint64(1), stat.Blocks)
assert.Equal(t, uint64(0), stat.Bfree)
}
_, err = httpdtest.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
err = os.RemoveAll(user.GetHomeDir())
assert.NoError(t, err)
}
func TestStatVFSCloudBackend(t *testing.T) {
usePubKey := true
u := getTestUser(usePubKey)
u.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider
u.FsConfig.AzBlobConfig.SASURL = "https://myaccount.blob.core.windows.net/sasurl"
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
assert.NoError(t, err)
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
err = dataprovider.UpdateUserQuota(user, 100, 8192, true)
assert.NoError(t, err)
stat, err := client.StatVFS("/")
assert.NoError(t, err)
assert.Greater(t, stat.ID, uint32(0))
assert.Greater(t, stat.Blocks, uint64(0))
assert.Greater(t, stat.Bsize, uint64(0))
assert.Equal(t, uint64(1000000+100), stat.Files)
assert.Equal(t, uint64(2147483648+2), stat.Blocks)
assert.Equal(t, uint64(1000000), stat.Ffree)
assert.Equal(t, uint64(2147483648), stat.Bfree)
}
_, err = httpdtest.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
}
func TestSSHCommands(t *testing.T) { func TestSSHCommands(t *testing.T) {
usePubKey := false usePubKey := false
user, _, err := httpdtest.AddUser(getTestUser(usePubKey), http.StatusCreated) user, _, err := httpdtest.AddUser(getTestUser(usePubKey), http.StatusCreated)

View file

@ -309,7 +309,7 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
return c.sendErrorResponse(errUnsupportedConfig) return c.sendErrorResponse(errUnsupportedConfig)
} }
sshDestPath := c.getDestPath() sshDestPath := c.getDestPath()
quotaResult := c.connection.HasSpace(true, command.quotaCheckPath) quotaResult := c.connection.HasSpace(true, false, command.quotaCheckPath)
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
return c.sendErrorResponse(common.ErrQuotaExceeded) return c.sendErrorResponse(common.ErrQuotaExceeded)
} }
@ -640,7 +640,7 @@ func (c *sshCommand) checkCopyDestination(fsDestPath string) error {
} }
func (c *sshCommand) checkCopyQuota(numFiles int, filesSize int64, requestPath string) error { func (c *sshCommand) checkCopyQuota(numFiles int, filesSize int64, requestPath string) error {
quotaResult := c.connection.HasSpace(true, requestPath) quotaResult := c.connection.HasSpace(true, false, requestPath)
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
return common.ErrQuotaExceeded return common.ErrQuotaExceeded
} }

View file

@ -21,6 +21,7 @@ import (
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/eikenb/pipeat" "github.com/eikenb/pipeat"
"github.com/pkg/sftp"
"github.com/drakkan/sftpgo/logger" "github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/metrics" "github.com/drakkan/sftpgo/metrics"
@ -702,8 +703,8 @@ func (*AzureBlobFs) Close() error {
} }
// GetAvailableDiskSize return the available size for the specified path // GetAvailableDiskSize return the available size for the specified path
func (*AzureBlobFs) GetAvailableDiskSize(dirName string) (int64, error) { func (*AzureBlobFs) GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error) {
return 0, errStorageSizeUnavailable return nil, ErrStorageSizeUnavailable
} }
func (fs *AzureBlobFs) isEqual(key string, virtualName string) bool { func (fs *AzureBlobFs) isEqual(key string, virtualName string) bool {

View file

@ -18,6 +18,7 @@ import (
"cloud.google.com/go/storage" "cloud.google.com/go/storage"
"github.com/eikenb/pipeat" "github.com/eikenb/pipeat"
"github.com/pkg/sftp"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
"google.golang.org/api/option" "google.golang.org/api/option"
@ -696,6 +697,6 @@ func (fs *GCSFs) Close() error {
} }
// GetAvailableDiskSize return the available size for the specified path // GetAvailableDiskSize return the available size for the specified path
func (*GCSFs) GetAvailableDiskSize(dirName string) (int64, error) { func (*GCSFs) GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error) {
return 0, errStorageSizeUnavailable return nil, ErrStorageSizeUnavailable
} }

View file

@ -11,8 +11,8 @@ import (
"time" "time"
"github.com/eikenb/pipeat" "github.com/eikenb/pipeat"
"github.com/pkg/sftp"
"github.com/rs/xid" "github.com/rs/xid"
"github.com/shirou/gopsutil/v3/disk"
"github.com/drakkan/sftpgo/logger" "github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils" "github.com/drakkan/sftpgo/utils"
@ -480,10 +480,6 @@ func (*OsFs) Close() error {
} }
// GetAvailableDiskSize return the available size for the specified path // GetAvailableDiskSize return the available size for the specified path
func (*OsFs) GetAvailableDiskSize(dirName string) (int64, error) { func (*OsFs) GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error) {
usage, err := disk.Usage(dirName) return getStatFS(dirName)
if err != nil {
return 0, err
}
return int64(usage.Free), nil
} }

View file

@ -20,6 +20,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/eikenb/pipeat" "github.com/eikenb/pipeat"
"github.com/pkg/sftp"
"github.com/drakkan/sftpgo/logger" "github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/metrics" "github.com/drakkan/sftpgo/metrics"
@ -661,6 +662,6 @@ func (*S3Fs) Close() error {
} }
// GetAvailableDiskSize return the available size for the specified path // GetAvailableDiskSize return the available size for the specified path
func (*S3Fs) GetAvailableDiskSize(dirName string) (int64, error) { func (*S3Fs) GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error) {
return 0, errStorageSizeUnavailable return nil, ErrStorageSizeUnavailable
} }

View file

@ -503,6 +503,17 @@ func (fs *SFTPFs) GetMimeType(name string) (string, error) {
return ctype, err return ctype, err
} }
// GetAvailableDiskSize return the available size for the specified path
func (fs *SFTPFs) GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error) {
if err := fs.checkConnection(); err != nil {
return nil, err
}
if _, ok := fs.sftpClient.HasExtension("statvfs@openssh.com"); !ok {
return nil, ErrStorageSizeUnavailable
}
return fs.sftpClient.StatVFS(dirName)
}
// Close the connection // Close the connection
func (fs *SFTPFs) Close() error { func (fs *SFTPFs) Close() error {
fs.Lock() fs.Lock()
@ -521,11 +532,6 @@ func (fs *SFTPFs) Close() error {
return sshErr return sshErr
} }
// GetAvailableDiskSize return the available size for the specified path
func (*SFTPFs) GetAvailableDiskSize(dirName string) (int64, error) {
return 0, errStorageSizeUnavailable
}
func (fs *SFTPFs) checkConnection() error { func (fs *SFTPFs) checkConnection() error {
err := fs.closed() err := fs.closed()
if err == nil { if err == nil {

38
vfs/statvfs_fallback.go Normal file
View file

@ -0,0 +1,38 @@
// +build !darwin,!linux,!freebsd
package vfs
import (
"github.com/pkg/sftp"
"github.com/shirou/gopsutil/v3/disk"
)
const bsize = uint64(4096)
func getStatFS(path string) (*sftp.StatVFS, error) {
usage, err := disk.Usage(path)
if err != nil {
return nil, err
}
// we assume block size = 4096
blocks := usage.Total / bsize
bfree := usage.Free / bsize
files := usage.InodesTotal
ffree := usage.InodesFree
if files == 0 {
// these assumptions are wrong but still better than returning 0
files = blocks / 4
ffree = bfree / 4
}
return &sftp.StatVFS{
Bsize: bsize,
Frsize: bsize,
Blocks: blocks,
Bfree: bfree,
Bavail: bfree,
Files: files,
Ffree: ffree,
Favail: ffree,
Namemax: 255,
}, nil
}

28
vfs/statvfs_linux.go Normal file
View file

@ -0,0 +1,28 @@
// +build linux
package vfs
import (
"github.com/pkg/sftp"
"golang.org/x/sys/unix"
)
func getStatFS(path string) (*sftp.StatVFS, error) {
stat := unix.Statfs_t{}
err := unix.Statfs(path, &stat)
if err != nil {
return nil, err
}
return &sftp.StatVFS{
Bsize: uint64(stat.Bsize),
Frsize: uint64(stat.Frsize),
Blocks: stat.Blocks,
Bfree: stat.Bfree,
Bavail: stat.Bavail,
Files: stat.Files,
Ffree: stat.Ffree,
Favail: stat.Ffree, // not sure how to calculate Favail
Flag: uint64(stat.Flags),
Namemax: uint64(stat.Namelen),
}, nil
}

28
vfs/statvfs_unix.go Normal file
View file

@ -0,0 +1,28 @@
// +build freebsd darwin
package vfs
import (
"github.com/pkg/sftp"
"golang.org/x/sys/unix"
)
func getStatFS(path string) (*sftp.StatVFS, error) {
stat := unix.Statfs_t{}
err := unix.Statfs(path, &stat)
if err != nil {
return nil, err
}
return &sftp.StatVFS{
Bsize: uint64(stat.Bsize),
Frsize: uint64(stat.Bsize),
Blocks: stat.Blocks,
Bfree: stat.Bfree,
Bavail: uint64(stat.Bavail),
Files: stat.Files,
Ffree: uint64(stat.Ffree),
Favail: uint64(stat.Ffree), // not sure how to calculate Favail
Flag: uint64(stat.Flags),
Namemax: 255, // we use a conservative value here
}, nil
}

View file

@ -14,6 +14,7 @@ import (
"time" "time"
"github.com/eikenb/pipeat" "github.com/eikenb/pipeat"
"github.com/pkg/sftp"
"github.com/drakkan/sftpgo/kms" "github.com/drakkan/sftpgo/kms"
"github.com/drakkan/sftpgo/logger" "github.com/drakkan/sftpgo/logger"
@ -23,8 +24,9 @@ import (
const dirMimeType = "inode/directory" const dirMimeType = "inode/directory"
var ( var (
validAzAccessTier = []string{"", "Archive", "Hot", "Cool"} validAzAccessTier = []string{"", "Archive", "Hot", "Cool"}
errStorageSizeUnavailable = errors.New("unable to get available size for this storage backend") // ErrStorageSizeUnavailable is returned if the storage backend does not support getting the size
ErrStorageSizeUnavailable = errors.New("unable to get available size for this storage backend")
) )
// Fs defines the interface for filesystem backends // Fs defines the interface for filesystem backends
@ -60,7 +62,7 @@ type Fs interface {
Join(elem ...string) string Join(elem ...string) string
HasVirtualFolders() bool HasVirtualFolders() bool
GetMimeType(name string) (string, error) GetMimeType(name string) (string, error)
GetAvailableDiskSize(dirName string) (int64, error) GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error)
Close() error Close() error
} }

View file

@ -211,7 +211,7 @@ func (c *Connection) putFile(fsPath, virtualPath string) (webdav.File, error) {
} }
func (c *Connection) handleUploadToNewFile(resolvedPath, filePath, requestPath string) (webdav.File, error) { func (c *Connection) handleUploadToNewFile(resolvedPath, filePath, requestPath string) (webdav.File, error) {
quotaResult := c.HasSpace(true, requestPath) quotaResult := c.HasSpace(true, false, requestPath)
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
c.Log(logger.LevelInfo, "denying file write due to quota limits") c.Log(logger.LevelInfo, "denying file write due to quota limits")
return nil, common.ErrQuotaExceeded return nil, common.ErrQuotaExceeded
@ -236,7 +236,7 @@ func (c *Connection) handleUploadToNewFile(resolvedPath, filePath, requestPath s
func (c *Connection) handleUploadToExistingFile(resolvedPath, filePath string, fileSize int64, func (c *Connection) handleUploadToExistingFile(resolvedPath, filePath string, fileSize int64,
requestPath string) (webdav.File, error) { requestPath string) (webdav.File, error) {
var err error var err error
quotaResult := c.HasSpace(false, requestPath) quotaResult := c.HasSpace(false, false, requestPath)
if !quotaResult.HasSpace { if !quotaResult.HasSpace {
c.Log(logger.LevelInfo, "denying file write due to quota limits") c.Log(logger.LevelInfo, "denying file write due to quota limits")
return nil, common.ErrQuotaExceeded return nil, common.ErrQuotaExceeded