mirror of
https://github.com/drakkan/sftpgo.git
synced 2024-11-22 15:40:23 +00:00
osfs: add optional buffering
Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
parent
e10487ad57
commit
adad8e658b
32 changed files with 895 additions and 170 deletions
4
go.mod
4
go.mod
|
@ -53,7 +53,7 @@ require (
|
||||||
github.com/rs/cors v1.9.0
|
github.com/rs/cors v1.9.0
|
||||||
github.com/rs/xid v1.5.0
|
github.com/rs/xid v1.5.0
|
||||||
github.com/rs/zerolog v1.29.1
|
github.com/rs/zerolog v1.29.1
|
||||||
github.com/sftpgo/sdk v0.1.4-0.20230512160325-38e59551f700
|
github.com/sftpgo/sdk v0.1.4-0.20230514135418-8b5d36c556e0
|
||||||
github.com/shirou/gopsutil/v3 v3.23.4
|
github.com/shirou/gopsutil/v3 v3.23.4
|
||||||
github.com/spf13/afero v1.9.5
|
github.com/spf13/afero v1.9.5
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.7.0
|
||||||
|
@ -148,7 +148,7 @@ require (
|
||||||
github.com/prometheus/procfs v0.9.0 // indirect
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
github.com/spf13/cast v1.5.1 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||||
|
|
9
go.sum
9
go.sum
|
@ -935,8 +935,8 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu
|
||||||
github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo=
|
github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo=
|
||||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
|
||||||
github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
|
github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
|
||||||
|
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||||
|
@ -1842,8 +1842,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg
|
||||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
||||||
github.com/sftpgo/sdk v0.1.4-0.20230512160325-38e59551f700 h1:jL6mfKAaFv862AnBUxIfTH9wmnuPjbWyjHQUGDo+Xt0=
|
github.com/sftpgo/sdk v0.1.4-0.20230514135418-8b5d36c556e0 h1:qUyCryFuF7zKUpNTLgn2KuQp/uYT3hKi4XU/ChJUmpo=
|
||||||
github.com/sftpgo/sdk v0.1.4-0.20230512160325-38e59551f700/go.mod h1:gDxDaU3rhp9Y92ddsE7SbQ8jdBNNWK1DKlp5eHXrsb8=
|
github.com/sftpgo/sdk v0.1.4-0.20230514135418-8b5d36c556e0/go.mod h1:TjeoMWS0JEXt9RukJveTnaiHj4+MVLtUiDC+mY++Odk=
|
||||||
github.com/shirou/gopsutil/v3 v3.23.4 h1:hZwmDxZs7Ewt75DV81r4pFMqbq+di2cbt9FsQBqLD2o=
|
github.com/shirou/gopsutil/v3 v3.23.4 h1:hZwmDxZs7Ewt75DV81r4pFMqbq+di2cbt9FsQBqLD2o=
|
||||||
github.com/shirou/gopsutil/v3 v3.23.4/go.mod h1:ZcGxyfzAMRevhUR2+cfhXDH6gQdFYE/t8j1nsU4mPI8=
|
github.com/shirou/gopsutil/v3 v3.23.4/go.mod h1:ZcGxyfzAMRevhUR2+cfhXDH6gQdFYE/t8j1nsU4mPI8=
|
||||||
github.com/shoenig/go-m1cpu v0.1.5/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZym39dbAQ=
|
github.com/shoenig/go-m1cpu v0.1.5/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZym39dbAQ=
|
||||||
|
@ -1883,8 +1883,9 @@ github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
|
||||||
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
|
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
|
||||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
|
||||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
||||||
|
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
|
||||||
|
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
|
||||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||||
|
|
|
@ -262,7 +262,7 @@ func TestPreDeleteAction(t *testing.T) {
|
||||||
}
|
}
|
||||||
user.Permissions = make(map[string][]string)
|
user.Permissions = make(map[string][]string)
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||||
fs := vfs.NewOsFs("id", homeDir, "")
|
fs := vfs.NewOsFs("id", homeDir, "", nil)
|
||||||
c := NewBaseConnection("id", ProtocolSFTP, "", "", user)
|
c := NewBaseConnection("id", ProtocolSFTP, "", "", user)
|
||||||
|
|
||||||
testfile := filepath.Join(user.HomeDir, "testfile")
|
testfile := filepath.Join(user.HomeDir, "testfile")
|
||||||
|
|
|
@ -861,7 +861,7 @@ func TestConnectionStatus(t *testing.T) {
|
||||||
Username: username,
|
Username: username,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
c1 := NewBaseConnection("id1", ProtocolSFTP, "", "", user)
|
c1 := NewBaseConnection("id1", ProtocolSFTP, "", "", user)
|
||||||
fakeConn1 := &fakeConnection{
|
fakeConn1 := &fakeConnection{
|
||||||
BaseConnection: c1,
|
BaseConnection: c1,
|
||||||
|
|
|
@ -86,7 +86,7 @@ func (fs *MockOsFs) Walk(_ string, walkFn filepath.WalkFunc) error {
|
||||||
|
|
||||||
func newMockOsFs(hasVirtualFolders bool, connectionID, rootDir, name string, err error) vfs.Fs {
|
func newMockOsFs(hasVirtualFolders bool, connectionID, rootDir, name string, err error) vfs.Fs {
|
||||||
return &MockOsFs{
|
return &MockOsFs{
|
||||||
Fs: vfs.NewOsFs(connectionID, rootDir, ""),
|
Fs: vfs.NewOsFs(connectionID, rootDir, "", nil),
|
||||||
name: name,
|
name: name,
|
||||||
hasVirtualFolders: hasVirtualFolders,
|
hasVirtualFolders: hasVirtualFolders,
|
||||||
err: err,
|
err: err,
|
||||||
|
@ -114,7 +114,7 @@ func TestRemoveErrors(t *testing.T) {
|
||||||
}
|
}
|
||||||
user.Permissions = make(map[string][]string)
|
user.Permissions = make(map[string][]string)
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
conn := NewBaseConnection("", ProtocolFTP, "", "", user)
|
conn := NewBaseConnection("", ProtocolFTP, "", "", user)
|
||||||
err := conn.IsRemoveDirAllowed(fs, mappedPath, "/virtualpath1")
|
err := conn.IsRemoveDirAllowed(fs, mappedPath, "/virtualpath1")
|
||||||
if assert.Error(t, err) {
|
if assert.Error(t, err) {
|
||||||
|
@ -159,7 +159,7 @@ func TestSetStatMode(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRecursiveRenameWalkError(t *testing.T) {
|
func TestRecursiveRenameWalkError(t *testing.T) {
|
||||||
fs := vfs.NewOsFs("", filepath.Clean(os.TempDir()), "")
|
fs := vfs.NewOsFs("", filepath.Clean(os.TempDir()), "", nil)
|
||||||
conn := NewBaseConnection("", ProtocolWebDAV, "", "", dataprovider.User{
|
conn := NewBaseConnection("", ProtocolWebDAV, "", "", dataprovider.User{
|
||||||
BaseUser: sdk.BaseUser{
|
BaseUser: sdk.BaseUser{
|
||||||
Permissions: map[string][]string{
|
Permissions: map[string][]string{
|
||||||
|
@ -193,7 +193,7 @@ func TestRecursiveRenameWalkError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCrossRenameFsErrors(t *testing.T) {
|
func TestCrossRenameFsErrors(t *testing.T) {
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
conn := NewBaseConnection("", ProtocolWebDAV, "", "", dataprovider.User{})
|
conn := NewBaseConnection("", ProtocolWebDAV, "", "", dataprovider.User{})
|
||||||
res := conn.hasSpaceForCrossRename(fs, vfs.QuotaCheckResult{}, 1, "missingsource")
|
res := conn.hasSpaceForCrossRename(fs, vfs.QuotaCheckResult{}, 1, "missingsource")
|
||||||
assert.False(t, res)
|
assert.False(t, res)
|
||||||
|
@ -224,7 +224,7 @@ func TestRenameVirtualFolders(t *testing.T) {
|
||||||
},
|
},
|
||||||
VirtualPath: vdir,
|
VirtualPath: vdir,
|
||||||
})
|
})
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
conn := NewBaseConnection("", ProtocolFTP, "", "", u)
|
conn := NewBaseConnection("", ProtocolFTP, "", "", u)
|
||||||
res := conn.isRenamePermitted(fs, fs, "source", "target", vdir, "vdirtarget", nil)
|
res := conn.isRenamePermitted(fs, fs, "source", "target", vdir, "vdirtarget", nil)
|
||||||
assert.False(t, res)
|
assert.False(t, res)
|
||||||
|
@ -376,7 +376,7 @@ func TestUpdateQuotaAfterRename(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestErrorsMapping(t *testing.T) {
|
func TestErrorsMapping(t *testing.T) {
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
conn := NewBaseConnection("", ProtocolSFTP, "", "", dataprovider.User{BaseUser: sdk.BaseUser{HomeDir: os.TempDir()}})
|
conn := NewBaseConnection("", ProtocolSFTP, "", "", dataprovider.User{BaseUser: sdk.BaseUser{HomeDir: os.TempDir()}})
|
||||||
osErrorsProtocols := []string{ProtocolWebDAV, ProtocolFTP, ProtocolHTTP, ProtocolHTTPShare,
|
osErrorsProtocols := []string{ProtocolWebDAV, ProtocolFTP, ProtocolHTTP, ProtocolHTTPShare,
|
||||||
ProtocolDataRetention, ProtocolOIDC, protocolEventAction}
|
ProtocolDataRetention, ProtocolOIDC, protocolEventAction}
|
||||||
|
|
|
@ -198,8 +198,8 @@ func (t *BaseTransfer) SetTimes(fsPath string, atime time.Time, mtime time.Time)
|
||||||
// If atomic uploads are enabled this differ from fsPath
|
// If atomic uploads are enabled this differ from fsPath
|
||||||
func (t *BaseTransfer) GetRealFsPath(fsPath string) string {
|
func (t *BaseTransfer) GetRealFsPath(fsPath string) string {
|
||||||
if fsPath == t.GetFsPath() {
|
if fsPath == t.GetFsPath() {
|
||||||
if t.File != nil {
|
if t.File != nil || vfs.IsLocalOsFs(t.Fs) {
|
||||||
return t.File.Name()
|
return t.effectiveFsPath
|
||||||
}
|
}
|
||||||
return t.fsPath
|
return t.fsPath
|
||||||
}
|
}
|
||||||
|
@ -289,9 +289,9 @@ func (t *BaseTransfer) Truncate(fsPath string, size int64) (int64, error) {
|
||||||
return initialSize, err
|
return initialSize, err
|
||||||
}
|
}
|
||||||
if size == 0 && t.BytesSent.Load() == 0 {
|
if size == 0 && t.BytesSent.Load() == 0 {
|
||||||
// for cloud providers the file is always truncated to zero, we don't support append/resume for uploads
|
// for cloud providers the file is always truncated to zero, we don't support append/resume for uploads.
|
||||||
// for buffered SFTP we can have buffered bytes so we returns an error
|
// For buffered SFTP and local fs we can have buffered bytes so we returns an error
|
||||||
if !vfs.IsBufferedSFTPFs(t.Fs) {
|
if !vfs.IsBufferedLocalOrSFTPFs(t.Fs) {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -373,16 +373,16 @@ func (t *BaseTransfer) Close() error {
|
||||||
dataprovider.UpdateUserTransferQuota(&t.Connection.User, t.BytesReceived.Load(), //nolint:errcheck
|
dataprovider.UpdateUserTransferQuota(&t.Connection.User, t.BytesReceived.Load(), //nolint:errcheck
|
||||||
t.BytesSent.Load(), false)
|
t.BytesSent.Load(), false)
|
||||||
}
|
}
|
||||||
if t.File != nil && t.Connection.IsQuotaExceededError(t.ErrTransfer) {
|
if (t.File != nil || vfs.IsLocalOsFs(t.Fs)) && t.Connection.IsQuotaExceededError(t.ErrTransfer) {
|
||||||
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
|
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
|
||||||
err = t.Fs.Remove(t.File.Name(), false)
|
err = t.Fs.Remove(t.effectiveFsPath, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.BytesReceived.Store(0)
|
t.BytesReceived.Store(0)
|
||||||
t.MinWriteOffset = 0
|
t.MinWriteOffset = 0
|
||||||
}
|
}
|
||||||
t.Connection.Log(logger.LevelWarn, "upload denied due to space limit, delete temporary file: %q, deletion error: %v",
|
t.Connection.Log(logger.LevelWarn, "upload denied due to space limit, delete temporary file: %q, deletion error: %v",
|
||||||
t.File.Name(), err)
|
t.effectiveFsPath, err)
|
||||||
} else if t.transferType == TransferUpload && t.effectiveFsPath != t.fsPath {
|
} else if t.isAtomicUpload() {
|
||||||
if t.ErrTransfer == nil || Config.UploadMode == UploadModeAtomicWithResume {
|
if t.ErrTransfer == nil || Config.UploadMode == UploadModeAtomicWithResume {
|
||||||
_, _, err = t.Fs.Rename(t.effectiveFsPath, t.fsPath)
|
_, _, err = t.Fs.Rename(t.effectiveFsPath, t.fsPath)
|
||||||
t.Connection.Log(logger.LevelDebug, "atomic upload completed, rename: %q -> %q, error: %v",
|
t.Connection.Log(logger.LevelDebug, "atomic upload completed, rename: %q -> %q, error: %v",
|
||||||
|
@ -436,6 +436,10 @@ func (t *BaseTransfer) Close() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *BaseTransfer) isAtomicUpload() bool {
|
||||||
|
return t.transferType == TransferUpload && t.effectiveFsPath != t.fsPath
|
||||||
|
}
|
||||||
|
|
||||||
func (t *BaseTransfer) updateTransferTimestamps(uploadFileSize, elapsed int64) {
|
func (t *BaseTransfer) updateTransferTimestamps(uploadFileSize, elapsed int64) {
|
||||||
if t.ErrTransfer != nil {
|
if t.ErrTransfer != nil {
|
||||||
return
|
return
|
||||||
|
|
|
@ -36,7 +36,7 @@ func TestTransferUpdateQuota(t *testing.T) {
|
||||||
transfer := BaseTransfer{
|
transfer := BaseTransfer{
|
||||||
Connection: conn,
|
Connection: conn,
|
||||||
transferType: TransferUpload,
|
transferType: TransferUpload,
|
||||||
Fs: vfs.NewOsFs("", os.TempDir(), ""),
|
Fs: vfs.NewOsFs("", os.TempDir(), "", nil),
|
||||||
}
|
}
|
||||||
transfer.BytesReceived.Store(123)
|
transfer.BytesReceived.Store(123)
|
||||||
errFake := errors.New("fake error")
|
errFake := errors.New("fake error")
|
||||||
|
@ -75,7 +75,7 @@ func TestTransferThrottling(t *testing.T) {
|
||||||
DownloadBandwidth: 40,
|
DownloadBandwidth: 40,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
testFileSize := int64(131072)
|
testFileSize := int64(131072)
|
||||||
wantedUploadElapsed := 1000 * (testFileSize / 1024) / u.UploadBandwidth
|
wantedUploadElapsed := 1000 * (testFileSize / 1024) / u.UploadBandwidth
|
||||||
wantedDownloadElapsed := 1000 * (testFileSize / 1024) / u.DownloadBandwidth
|
wantedDownloadElapsed := 1000 * (testFileSize / 1024) / u.DownloadBandwidth
|
||||||
|
@ -107,7 +107,7 @@ func TestTransferThrottling(t *testing.T) {
|
||||||
|
|
||||||
func TestRealPath(t *testing.T) {
|
func TestRealPath(t *testing.T) {
|
||||||
testFile := filepath.Join(os.TempDir(), "afile.txt")
|
testFile := filepath.Join(os.TempDir(), "afile.txt")
|
||||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
fs := vfs.NewOsFs("123", os.TempDir(), "", nil)
|
||||||
u := dataprovider.User{
|
u := dataprovider.User{
|
||||||
BaseUser: sdk.BaseUser{
|
BaseUser: sdk.BaseUser{
|
||||||
Username: "user",
|
Username: "user",
|
||||||
|
@ -141,7 +141,7 @@ func TestRealPath(t *testing.T) {
|
||||||
|
|
||||||
func TestTruncate(t *testing.T) {
|
func TestTruncate(t *testing.T) {
|
||||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
fs := vfs.NewOsFs("123", os.TempDir(), "", nil)
|
||||||
u := dataprovider.User{
|
u := dataprovider.User{
|
||||||
BaseUser: sdk.BaseUser{
|
BaseUser: sdk.BaseUser{
|
||||||
Username: "user",
|
Username: "user",
|
||||||
|
@ -210,7 +210,7 @@ func TestTransferErrors(t *testing.T) {
|
||||||
isCancelled = true
|
isCancelled = true
|
||||||
}
|
}
|
||||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||||
fs := vfs.NewOsFs("id", os.TempDir(), "")
|
fs := vfs.NewOsFs("id", os.TempDir(), "", nil)
|
||||||
u := dataprovider.User{
|
u := dataprovider.User{
|
||||||
BaseUser: sdk.BaseUser{
|
BaseUser: sdk.BaseUser{
|
||||||
Username: "test",
|
Username: "test",
|
||||||
|
@ -321,7 +321,7 @@ func TestFTPMode(t *testing.T) {
|
||||||
transfer := BaseTransfer{
|
transfer := BaseTransfer{
|
||||||
Connection: conn,
|
Connection: conn,
|
||||||
transferType: TransferUpload,
|
transferType: TransferUpload,
|
||||||
Fs: vfs.NewOsFs("", os.TempDir(), ""),
|
Fs: vfs.NewOsFs("", os.TempDir(), "", nil),
|
||||||
}
|
}
|
||||||
transfer.BytesReceived.Store(123)
|
transfer.BytesReceived.Store(123)
|
||||||
assert.Empty(t, transfer.ftpMode)
|
assert.Empty(t, transfer.ftpMode)
|
||||||
|
@ -399,7 +399,7 @@ func TestTransferQuota(t *testing.T) {
|
||||||
|
|
||||||
conn := NewBaseConnection("", ProtocolSFTP, "", "", user)
|
conn := NewBaseConnection("", ProtocolSFTP, "", "", user)
|
||||||
transfer := NewBaseTransfer(nil, conn, nil, "file.txt", "file.txt", "/transfer_test_file", TransferUpload,
|
transfer := NewBaseTransfer(nil, conn, nil, "file.txt", "file.txt", "/transfer_test_file", TransferUpload,
|
||||||
0, 0, 0, 0, true, vfs.NewOsFs("", os.TempDir(), ""), dataprovider.TransferQuota{})
|
0, 0, 0, 0, true, vfs.NewOsFs("", os.TempDir(), "", nil), dataprovider.TransferQuota{})
|
||||||
err := transfer.CheckRead()
|
err := transfer.CheckRead()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = transfer.CheckWrite()
|
err = transfer.CheckWrite()
|
||||||
|
@ -453,7 +453,7 @@ func TestUploadOutsideHomeRenameError(t *testing.T) {
|
||||||
transfer := BaseTransfer{
|
transfer := BaseTransfer{
|
||||||
Connection: conn,
|
Connection: conn,
|
||||||
transferType: TransferUpload,
|
transferType: TransferUpload,
|
||||||
Fs: vfs.NewOsFs("", filepath.Join(os.TempDir(), "home"), ""),
|
Fs: vfs.NewOsFs("", filepath.Join(os.TempDir(), "home"), "", nil),
|
||||||
}
|
}
|
||||||
transfer.BytesReceived.Store(123)
|
transfer.BytesReceived.Store(123)
|
||||||
|
|
||||||
|
|
|
@ -2622,11 +2622,12 @@ func getVirtualFolderIfInvalid(folder *vfs.BaseVirtualFolder) *vfs.BaseVirtualFo
|
||||||
if err := ValidateFolder(folder); err == nil {
|
if err := ValidateFolder(folder); err == nil {
|
||||||
return folder
|
return folder
|
||||||
}
|
}
|
||||||
// we try to get the folder from the data provider if only the Name is populated
|
if folder.Name == "" {
|
||||||
if folder.MappedPath != "" {
|
|
||||||
return folder
|
return folder
|
||||||
}
|
}
|
||||||
if folder.Name == "" {
|
// we try to get the folder from the data provider if only the Name is populated
|
||||||
|
// so if MappedPath or Provider are set just return
|
||||||
|
if folder.MappedPath != "" {
|
||||||
return folder
|
return folder
|
||||||
}
|
}
|
||||||
if folder.FsConfig.Provider != sdk.LocalFilesystemProvider {
|
if folder.FsConfig.Provider != sdk.LocalFilesystemProvider {
|
||||||
|
|
|
@ -173,7 +173,7 @@ func (u *User) getRootFs(connectionID string) (fs vfs.Fs, err error) {
|
||||||
case sdk.HTTPFilesystemProvider:
|
case sdk.HTTPFilesystemProvider:
|
||||||
return vfs.NewHTTPFs(connectionID, u.GetHomeDir(), "", u.FsConfig.HTTPConfig)
|
return vfs.NewHTTPFs(connectionID, u.GetHomeDir(), "", u.FsConfig.HTTPConfig)
|
||||||
default:
|
default:
|
||||||
return vfs.NewOsFs(connectionID, u.GetHomeDir(), ""), nil
|
return vfs.NewOsFs(connectionID, u.GetHomeDir(), "", &u.FsConfig.OSConfig), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,7 +218,7 @@ func (u *User) checkLocalHomeDir(connectionID string) {
|
||||||
case sdk.LocalFilesystemProvider, sdk.CryptedFilesystemProvider:
|
case sdk.LocalFilesystemProvider, sdk.CryptedFilesystemProvider:
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
osFs := vfs.NewOsFs(connectionID, u.GetHomeDir(), "")
|
osFs := vfs.NewOsFs(connectionID, u.GetHomeDir(), "", nil)
|
||||||
osFs.CheckRootPath(u.Username, u.GetUID(), u.GetGID())
|
osFs.CheckRootPath(u.Username, u.GetUID(), u.GetGID())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1631,7 +1631,7 @@ func (u *User) applyGroupSettings(groupsMapping map[string]Group) {
|
||||||
for _, g := range u.Groups {
|
for _, g := range u.Groups {
|
||||||
if g.Type == sdk.GroupTypePrimary {
|
if g.Type == sdk.GroupTypePrimary {
|
||||||
if group, ok := groupsMapping[g.Name]; ok {
|
if group, ok := groupsMapping[g.Name]; ok {
|
||||||
u.mergeWithPrimaryGroup(group, replacer)
|
u.mergeWithPrimaryGroup(&group, replacer)
|
||||||
} else {
|
} else {
|
||||||
providerLog(logger.LevelError, "mapping not found for user %s, group %s", u.Username, g.Name)
|
providerLog(logger.LevelError, "mapping not found for user %s, group %s", u.Username, g.Name)
|
||||||
}
|
}
|
||||||
|
@ -1641,7 +1641,7 @@ func (u *User) applyGroupSettings(groupsMapping map[string]Group) {
|
||||||
for _, g := range u.Groups {
|
for _, g := range u.Groups {
|
||||||
if g.Type == sdk.GroupTypeSecondary {
|
if g.Type == sdk.GroupTypeSecondary {
|
||||||
if group, ok := groupsMapping[g.Name]; ok {
|
if group, ok := groupsMapping[g.Name]; ok {
|
||||||
u.mergeAdditiveProperties(group, sdk.GroupTypeSecondary, replacer)
|
u.mergeAdditiveProperties(&group, sdk.GroupTypeSecondary, replacer)
|
||||||
} else {
|
} else {
|
||||||
providerLog(logger.LevelError, "mapping not found for user %s, group %s", u.Username, g.Name)
|
providerLog(logger.LevelError, "mapping not found for user %s, group %s", u.Username, g.Name)
|
||||||
}
|
}
|
||||||
|
@ -1674,17 +1674,19 @@ func (u *User) LoadAndApplyGroupSettings() error {
|
||||||
}
|
}
|
||||||
replacer := u.getGroupPlacehodersReplacer()
|
replacer := u.getGroupPlacehodersReplacer()
|
||||||
// make sure to always merge with the primary group first
|
// make sure to always merge with the primary group first
|
||||||
for idx, g := range groups {
|
for idx := range groups {
|
||||||
|
g := groups[idx]
|
||||||
if g.Name == primaryGroupName {
|
if g.Name == primaryGroupName {
|
||||||
u.mergeWithPrimaryGroup(g, replacer)
|
u.mergeWithPrimaryGroup(&g, replacer)
|
||||||
lastIdx := len(groups) - 1
|
lastIdx := len(groups) - 1
|
||||||
groups[idx] = groups[lastIdx]
|
groups[idx] = groups[lastIdx]
|
||||||
groups = groups[:lastIdx]
|
groups = groups[:lastIdx]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, g := range groups {
|
for idx := range groups {
|
||||||
u.mergeAdditiveProperties(g, sdk.GroupTypeSecondary, replacer)
|
g := groups[idx]
|
||||||
|
u.mergeAdditiveProperties(&g, sdk.GroupTypeSecondary, replacer)
|
||||||
}
|
}
|
||||||
u.removeDuplicatesAfterGroupMerge()
|
u.removeDuplicatesAfterGroupMerge()
|
||||||
return nil
|
return nil
|
||||||
|
@ -1718,12 +1720,31 @@ func (u *User) replaceFsConfigPlaceholders(fsConfig vfs.Filesystem, replacer *st
|
||||||
return fsConfig
|
return fsConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *User) mergeWithPrimaryGroup(group Group, replacer *strings.Replacer) {
|
func (u *User) mergeCryptFsConfig(group *Group) {
|
||||||
|
if group.UserSettings.FsConfig.Provider == sdk.CryptedFilesystemProvider {
|
||||||
|
if u.FsConfig.CryptConfig.ReadBufferSize == 0 {
|
||||||
|
u.FsConfig.CryptConfig.ReadBufferSize = group.UserSettings.FsConfig.CryptConfig.ReadBufferSize
|
||||||
|
}
|
||||||
|
if u.FsConfig.CryptConfig.WriteBufferSize == 0 {
|
||||||
|
u.FsConfig.CryptConfig.WriteBufferSize = group.UserSettings.FsConfig.CryptConfig.WriteBufferSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *User) mergeWithPrimaryGroup(group *Group, replacer *strings.Replacer) {
|
||||||
if group.UserSettings.HomeDir != "" {
|
if group.UserSettings.HomeDir != "" {
|
||||||
u.HomeDir = u.replacePlaceholder(group.UserSettings.HomeDir, replacer)
|
u.HomeDir = u.replacePlaceholder(group.UserSettings.HomeDir, replacer)
|
||||||
}
|
}
|
||||||
if group.UserSettings.FsConfig.Provider != 0 {
|
if group.UserSettings.FsConfig.Provider != 0 {
|
||||||
u.FsConfig = u.replaceFsConfigPlaceholders(group.UserSettings.FsConfig, replacer)
|
u.FsConfig = u.replaceFsConfigPlaceholders(group.UserSettings.FsConfig, replacer)
|
||||||
|
u.mergeCryptFsConfig(group)
|
||||||
|
} else {
|
||||||
|
if u.FsConfig.OSConfig.ReadBufferSize == 0 {
|
||||||
|
u.FsConfig.OSConfig.ReadBufferSize = group.UserSettings.FsConfig.OSConfig.ReadBufferSize
|
||||||
|
}
|
||||||
|
if u.FsConfig.OSConfig.WriteBufferSize == 0 {
|
||||||
|
u.FsConfig.OSConfig.WriteBufferSize = group.UserSettings.FsConfig.OSConfig.WriteBufferSize
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if u.MaxSessions == 0 {
|
if u.MaxSessions == 0 {
|
||||||
u.MaxSessions = group.UserSettings.MaxSessions
|
u.MaxSessions = group.UserSettings.MaxSessions
|
||||||
|
@ -1748,11 +1769,11 @@ func (u *User) mergeWithPrimaryGroup(group Group, replacer *strings.Replacer) {
|
||||||
if u.ExpirationDate == 0 && group.UserSettings.ExpiresIn > 0 {
|
if u.ExpirationDate == 0 && group.UserSettings.ExpiresIn > 0 {
|
||||||
u.ExpirationDate = u.CreatedAt + int64(group.UserSettings.ExpiresIn)*86400000
|
u.ExpirationDate = u.CreatedAt + int64(group.UserSettings.ExpiresIn)*86400000
|
||||||
}
|
}
|
||||||
u.mergePrimaryGroupFilters(group.UserSettings.Filters, replacer)
|
u.mergePrimaryGroupFilters(&group.UserSettings.Filters, replacer)
|
||||||
u.mergeAdditiveProperties(group, sdk.GroupTypePrimary, replacer)
|
u.mergeAdditiveProperties(group, sdk.GroupTypePrimary, replacer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *User) mergePrimaryGroupFilters(filters sdk.BaseUserFilters, replacer *strings.Replacer) {
|
func (u *User) mergePrimaryGroupFilters(filters *sdk.BaseUserFilters, replacer *strings.Replacer) {
|
||||||
if u.Filters.MaxUploadFileSize == 0 {
|
if u.Filters.MaxUploadFileSize == 0 {
|
||||||
u.Filters.MaxUploadFileSize = filters.MaxUploadFileSize
|
u.Filters.MaxUploadFileSize = filters.MaxUploadFileSize
|
||||||
}
|
}
|
||||||
|
@ -1797,7 +1818,7 @@ func (u *User) mergePrimaryGroupFilters(filters sdk.BaseUserFilters, replacer *s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *User) mergeAdditiveProperties(group Group, groupType int, replacer *strings.Replacer) {
|
func (u *User) mergeAdditiveProperties(group *Group, groupType int, replacer *strings.Replacer) {
|
||||||
u.mergeVirtualFolders(group, groupType, replacer)
|
u.mergeVirtualFolders(group, groupType, replacer)
|
||||||
u.mergePermissions(group, groupType, replacer)
|
u.mergePermissions(group, groupType, replacer)
|
||||||
u.mergeFilePatterns(group, groupType, replacer)
|
u.mergeFilePatterns(group, groupType, replacer)
|
||||||
|
@ -1811,7 +1832,7 @@ func (u *User) mergeAdditiveProperties(group Group, groupType int, replacer *str
|
||||||
u.Filters.TwoFactorAuthProtocols = append(u.Filters.TwoFactorAuthProtocols, group.UserSettings.Filters.TwoFactorAuthProtocols...)
|
u.Filters.TwoFactorAuthProtocols = append(u.Filters.TwoFactorAuthProtocols, group.UserSettings.Filters.TwoFactorAuthProtocols...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *User) mergeVirtualFolders(group Group, groupType int, replacer *strings.Replacer) {
|
func (u *User) mergeVirtualFolders(group *Group, groupType int, replacer *strings.Replacer) {
|
||||||
if len(group.VirtualFolders) > 0 {
|
if len(group.VirtualFolders) > 0 {
|
||||||
folderPaths := make(map[string]bool)
|
folderPaths := make(map[string]bool)
|
||||||
for _, folder := range u.VirtualFolders {
|
for _, folder := range u.VirtualFolders {
|
||||||
|
@ -1831,7 +1852,7 @@ func (u *User) mergeVirtualFolders(group Group, groupType int, replacer *strings
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *User) mergePermissions(group Group, groupType int, replacer *strings.Replacer) {
|
func (u *User) mergePermissions(group *Group, groupType int, replacer *strings.Replacer) {
|
||||||
for k, v := range group.UserSettings.Permissions {
|
for k, v := range group.UserSettings.Permissions {
|
||||||
if k == "/" {
|
if k == "/" {
|
||||||
if groupType == sdk.GroupTypePrimary {
|
if groupType == sdk.GroupTypePrimary {
|
||||||
|
@ -1847,7 +1868,7 @@ func (u *User) mergePermissions(group Group, groupType int, replacer *strings.Re
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *User) mergeFilePatterns(group Group, groupType int, replacer *strings.Replacer) {
|
func (u *User) mergeFilePatterns(group *Group, groupType int, replacer *strings.Replacer) {
|
||||||
if len(group.UserSettings.Filters.FilePatterns) > 0 {
|
if len(group.UserSettings.Filters.FilePatterns) > 0 {
|
||||||
patternPaths := make(map[string]bool)
|
patternPaths := make(map[string]bool)
|
||||||
for _, pattern := range u.Filters.FilePatterns {
|
for _, pattern := range u.Filters.FilePatterns {
|
||||||
|
|
|
@ -136,6 +136,51 @@ func TestBasicFTPHandlingCryptFs(t *testing.T) {
|
||||||
50*time.Millisecond)
|
50*time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBufferedCryptFs(t *testing.T) {
|
||||||
|
u := getTestUserWithCryptFs()
|
||||||
|
u.FsConfig.CryptConfig.OSFsConfig = sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: 1,
|
||||||
|
WriteBufferSize: 1,
|
||||||
|
}
|
||||||
|
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
client, err := getFTPClient(user, true, nil)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||||
|
testFileSize := int64(65535)
|
||||||
|
err = createTestFile(testFilePath, testFileSize)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = checkBasicFTP(client)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = ftpUploadFile(testFilePath, testFileName, testFileSize, client, 0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// overwrite an existing file
|
||||||
|
err = ftpUploadFile(testFilePath, testFileName, testFileSize, client, 0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||||
|
err = ftpDownloadFile(testFileName, localDownloadPath, testFileSize, client, 0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
info, err := os.Stat(localDownloadPath)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
assert.Equal(t, testFileSize, info.Size())
|
||||||
|
}
|
||||||
|
err = os.Remove(testFilePath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.Remove(localDownloadPath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = client.Quit()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
_, err = httpdtest.RemoveUser(user, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.RemoveAll(user.GetHomeDir())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Eventually(t, func() bool { return len(common.Connections.GetStats("")) == 0 }, 1*time.Second, 50*time.Millisecond)
|
||||||
|
assert.Eventually(t, func() bool { return common.Connections.GetClientConnections() == 0 }, 1000*time.Millisecond,
|
||||||
|
50*time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
func TestZeroBytesTransfersCryptFs(t *testing.T) {
|
func TestZeroBytesTransfersCryptFs(t *testing.T) {
|
||||||
u := getTestUserWithCryptFs()
|
u := getTestUserWithCryptFs()
|
||||||
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||||
|
|
|
@ -2083,7 +2083,16 @@ func TestResume(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
sftpUser, _, err := httpdtest.AddUser(getTestSFTPUser(), http.StatusCreated)
|
sftpUser, _, err := httpdtest.AddUser(getTestSFTPUser(), http.StatusCreated)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
for _, user := range []dataprovider.User{localUser, sftpUser} {
|
u = getTestUser()
|
||||||
|
u.FsConfig.OSConfig = sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: 1,
|
||||||
|
WriteBufferSize: 1,
|
||||||
|
}
|
||||||
|
u.Username += "_buf"
|
||||||
|
u.HomeDir += "_buf"
|
||||||
|
bufferedUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
for _, user := range []dataprovider.User{localUser, sftpUser, bufferedUser} {
|
||||||
client, err := getFTPClient(user, true, nil)
|
client, err := getFTPClient(user, true, nil)
|
||||||
if assert.NoError(t, err) {
|
if assert.NoError(t, err) {
|
||||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||||
|
@ -2166,6 +2175,10 @@ func TestResume(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = os.RemoveAll(localUser.GetHomeDir())
|
err = os.RemoveAll(localUser.GetHomeDir())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveUser(bufferedUser, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.RemoveAll(bufferedUser.GetHomeDir())
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:dupl
|
//nolint:dupl
|
||||||
|
|
|
@ -398,7 +398,7 @@ func (fs MockOsFs) Rename(source, target string) (int, int64, error) {
|
||||||
|
|
||||||
func newMockOsFs(err, statErr error, atomicUpload bool, connectionID, rootDir string) vfs.Fs {
|
func newMockOsFs(err, statErr error, atomicUpload bool, connectionID, rootDir string) vfs.Fs {
|
||||||
return &MockOsFs{
|
return &MockOsFs{
|
||||||
Fs: vfs.NewOsFs(connectionID, rootDir, ""),
|
Fs: vfs.NewOsFs(connectionID, rootDir, "", nil),
|
||||||
err: err,
|
err: err,
|
||||||
statErr: statErr,
|
statErr: statErr,
|
||||||
isAtomicUploadSupported: atomicUpload,
|
isAtomicUploadSupported: atomicUpload,
|
||||||
|
@ -719,7 +719,7 @@ func TestUploadFileStatError(t *testing.T) {
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||||
mockCC := mockFTPClientContext{}
|
mockCC := mockFTPClientContext{}
|
||||||
connID := fmt.Sprintf("%v", mockCC.ID())
|
connID := fmt.Sprintf("%v", mockCC.ID())
|
||||||
fs := vfs.NewOsFs(connID, user.HomeDir, "")
|
fs := vfs.NewOsFs(connID, user.HomeDir, "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection(connID, common.ProtocolFTP, "", "", user),
|
BaseConnection: common.NewBaseConnection(connID, common.ProtocolFTP, "", "", user),
|
||||||
clientContext: mockCC,
|
clientContext: mockCC,
|
||||||
|
@ -809,7 +809,7 @@ func TestUploadOverwriteErrors(t *testing.T) {
|
||||||
_, err = connection.handleFTPUploadToExistingFile(fs, os.O_TRUNC, filepath.Join(os.TempDir(), "sub", "file"),
|
_, err = connection.handleFTPUploadToExistingFile(fs, os.O_TRUNC, filepath.Join(os.TempDir(), "sub", "file"),
|
||||||
filepath.Join(os.TempDir(), "sub", "file1"), 0, "/sub/file1")
|
filepath.Join(os.TempDir(), "sub", "file1"), 0, "/sub/file1")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
fs = vfs.NewOsFs(connID, user.GetHomeDir(), "")
|
fs = vfs.NewOsFs(connID, user.GetHomeDir(), "", nil)
|
||||||
_, err = connection.handleFTPUploadToExistingFile(fs, 0, "missing1", "missing2", 0, "missing")
|
_, err = connection.handleFTPUploadToExistingFile(fs, 0, "missing1", "missing2", 0, "missing")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -617,6 +617,10 @@ func TestBasicUserHandling(t *testing.T) {
|
||||||
assert.True(t, user.HasPassword)
|
assert.True(t, user.HasPassword)
|
||||||
|
|
||||||
user.Email = "invalid@email"
|
user.Email = "invalid@email"
|
||||||
|
user.FsConfig.OSConfig = sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: 1,
|
||||||
|
WriteBufferSize: 2,
|
||||||
|
}
|
||||||
_, body, err := httpdtest.UpdateUser(user, http.StatusBadRequest, "")
|
_, body, err := httpdtest.UpdateUser(user, http.StatusBadRequest, "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Contains(t, string(body), "Validation error: email")
|
assert.Contains(t, string(body), "Validation error: email")
|
||||||
|
@ -887,6 +891,12 @@ func TestGroupRelations(t *testing.T) {
|
||||||
_, _, err := httpdtest.AddFolder(vfs.BaseVirtualFolder{
|
_, _, err := httpdtest.AddFolder(vfs.BaseVirtualFolder{
|
||||||
Name: folderName2,
|
Name: folderName2,
|
||||||
MappedPath: mappedPath2,
|
MappedPath: mappedPath2,
|
||||||
|
FsConfig: vfs.Filesystem{
|
||||||
|
OSConfig: sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: 3,
|
||||||
|
WriteBufferSize: 5,
|
||||||
|
},
|
||||||
|
},
|
||||||
}, http.StatusCreated)
|
}, http.StatusCreated)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
g1 := getTestGroup()
|
g1 := getTestGroup()
|
||||||
|
@ -1145,21 +1155,47 @@ func TestGroupSettingsOverride(t *testing.T) {
|
||||||
folderName1 := filepath.Base(mappedPath1)
|
folderName1 := filepath.Base(mappedPath1)
|
||||||
mappedPath2 := filepath.Join(os.TempDir(), util.GenerateUniqueID())
|
mappedPath2 := filepath.Join(os.TempDir(), util.GenerateUniqueID())
|
||||||
folderName2 := filepath.Base(mappedPath2)
|
folderName2 := filepath.Base(mappedPath2)
|
||||||
|
mappedPath3 := filepath.Join(os.TempDir(), util.GenerateUniqueID())
|
||||||
|
folderName3 := filepath.Base(mappedPath3)
|
||||||
g1 := getTestGroup()
|
g1 := getTestGroup()
|
||||||
g1.Name += "_1"
|
g1.Name += "_1"
|
||||||
g1.VirtualFolders = append(g1.VirtualFolders, vfs.VirtualFolder{
|
g1.VirtualFolders = append(g1.VirtualFolders, vfs.VirtualFolder{
|
||||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||||
Name: folderName1,
|
Name: folderName1,
|
||||||
MappedPath: mappedPath1,
|
MappedPath: mappedPath1,
|
||||||
|
FsConfig: vfs.Filesystem{
|
||||||
|
OSConfig: sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: 3,
|
||||||
|
WriteBufferSize: 5,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
VirtualPath: "/vdir1",
|
VirtualPath: "/vdir1",
|
||||||
})
|
})
|
||||||
|
g1.UserSettings.Permissions = map[string][]string{
|
||||||
|
"/dir1": {dataprovider.PermUpload},
|
||||||
|
"/dir2": {dataprovider.PermDownload, dataprovider.PermListItems},
|
||||||
|
}
|
||||||
|
g1.UserSettings.FsConfig.OSConfig = sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: 6,
|
||||||
|
WriteBufferSize: 2,
|
||||||
|
}
|
||||||
g2 := getTestGroup()
|
g2 := getTestGroup()
|
||||||
g2.Name += "_2"
|
g2.Name += "_2"
|
||||||
|
g2.UserSettings.Permissions = map[string][]string{
|
||||||
|
"/dir1": {dataprovider.PermAny},
|
||||||
|
"/dir3": {dataprovider.PermDownload, dataprovider.PermListItems, dataprovider.PermChtimes},
|
||||||
|
}
|
||||||
g2.VirtualFolders = append(g2.VirtualFolders, vfs.VirtualFolder{
|
g2.VirtualFolders = append(g2.VirtualFolders, vfs.VirtualFolder{
|
||||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||||
Name: folderName1,
|
Name: folderName1,
|
||||||
MappedPath: mappedPath1,
|
MappedPath: mappedPath1,
|
||||||
|
FsConfig: vfs.Filesystem{
|
||||||
|
OSConfig: sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: 3,
|
||||||
|
WriteBufferSize: 5,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
VirtualPath: "/vdir2",
|
VirtualPath: "/vdir2",
|
||||||
})
|
})
|
||||||
|
@ -1170,6 +1206,19 @@ func TestGroupSettingsOverride(t *testing.T) {
|
||||||
},
|
},
|
||||||
VirtualPath: "/vdir3",
|
VirtualPath: "/vdir3",
|
||||||
})
|
})
|
||||||
|
g2.VirtualFolders = append(g2.VirtualFolders, vfs.VirtualFolder{
|
||||||
|
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||||
|
Name: folderName3,
|
||||||
|
MappedPath: mappedPath3,
|
||||||
|
FsConfig: vfs.Filesystem{
|
||||||
|
OSConfig: sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: 1,
|
||||||
|
WriteBufferSize: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
VirtualPath: "/vdir4",
|
||||||
|
})
|
||||||
group1, resp, err := httpdtest.AddGroup(g1, http.StatusCreated)
|
group1, resp, err := httpdtest.AddGroup(g1, http.StatusCreated)
|
||||||
assert.NoError(t, err, string(resp))
|
assert.NoError(t, err, string(resp))
|
||||||
group2, resp, err := httpdtest.AddGroup(g2, http.StatusCreated)
|
group2, resp, err := httpdtest.AddGroup(g2, http.StatusCreated)
|
||||||
|
@ -1188,19 +1237,55 @@ func TestGroupSettingsOverride(t *testing.T) {
|
||||||
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, user.VirtualFolders, 0)
|
assert.Len(t, user.VirtualFolders, 0)
|
||||||
|
assert.Len(t, user.Permissions, 1)
|
||||||
|
|
||||||
user, err = dataprovider.CheckUserAndPass(defaultUsername, defaultPassword, "", common.ProtocolHTTP)
|
user, err = dataprovider.CheckUserAndPass(defaultUsername, defaultPassword, "", common.ProtocolHTTP)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, user.VirtualFolders, 3)
|
|
||||||
|
var folderNames []string
|
||||||
|
if assert.Len(t, user.VirtualFolders, 4) {
|
||||||
|
for _, f := range user.VirtualFolders {
|
||||||
|
if !util.Contains(folderNames, f.Name) {
|
||||||
|
folderNames = append(folderNames, f.Name)
|
||||||
|
}
|
||||||
|
switch f.Name {
|
||||||
|
case folderName1:
|
||||||
|
assert.Equal(t, mappedPath1, f.MappedPath)
|
||||||
|
assert.Equal(t, 3, f.BaseVirtualFolder.FsConfig.OSConfig.ReadBufferSize)
|
||||||
|
assert.Equal(t, 5, f.BaseVirtualFolder.FsConfig.OSConfig.WriteBufferSize)
|
||||||
|
assert.True(t, util.Contains([]string{"/vdir1", "/vdir2"}, f.VirtualPath))
|
||||||
|
case folderName2:
|
||||||
|
assert.Equal(t, mappedPath2, f.MappedPath)
|
||||||
|
assert.Equal(t, "/vdir3", f.VirtualPath)
|
||||||
|
assert.Equal(t, 0, f.BaseVirtualFolder.FsConfig.OSConfig.ReadBufferSize)
|
||||||
|
assert.Equal(t, 0, f.BaseVirtualFolder.FsConfig.OSConfig.WriteBufferSize)
|
||||||
|
case folderName3:
|
||||||
|
assert.Equal(t, mappedPath3, f.MappedPath)
|
||||||
|
assert.Equal(t, "/vdir4", f.VirtualPath)
|
||||||
|
assert.Equal(t, 1, f.BaseVirtualFolder.FsConfig.OSConfig.ReadBufferSize)
|
||||||
|
assert.Equal(t, 2, f.BaseVirtualFolder.FsConfig.OSConfig.WriteBufferSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.Len(t, folderNames, 3)
|
||||||
|
assert.Contains(t, folderNames, folderName1)
|
||||||
|
assert.Contains(t, folderNames, folderName2)
|
||||||
|
assert.Contains(t, folderNames, folderName3)
|
||||||
|
assert.Len(t, user.Permissions, 4)
|
||||||
|
assert.Equal(t, g1.UserSettings.Permissions["/dir1"], user.Permissions["/dir1"])
|
||||||
|
assert.Equal(t, g1.UserSettings.Permissions["/dir2"], user.Permissions["/dir2"])
|
||||||
|
assert.Equal(t, g2.UserSettings.Permissions["/dir3"], user.Permissions["/dir3"])
|
||||||
|
assert.Equal(t, g1.UserSettings.FsConfig.OSConfig.ReadBufferSize, user.FsConfig.OSConfig.ReadBufferSize)
|
||||||
|
assert.Equal(t, g1.UserSettings.FsConfig.OSConfig.WriteBufferSize, user.FsConfig.OSConfig.WriteBufferSize)
|
||||||
|
|
||||||
user, err = dataprovider.GetUserAfterIDPAuth(defaultUsername, "", common.ProtocolOIDC, nil)
|
user, err = dataprovider.GetUserAfterIDPAuth(defaultUsername, "", common.ProtocolOIDC, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, user.VirtualFolders, 3)
|
assert.Len(t, user.VirtualFolders, 4)
|
||||||
|
|
||||||
user1, user2, err := dataprovider.GetUserVariants(defaultUsername, "")
|
user1, user2, err := dataprovider.GetUserVariants(defaultUsername, "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, user1.VirtualFolders, 0)
|
assert.Len(t, user1.VirtualFolders, 0)
|
||||||
assert.Len(t, user2.VirtualFolders, 3)
|
assert.Len(t, user2.VirtualFolders, 4)
|
||||||
assert.Equal(t, int64(0), user1.ExpirationDate)
|
assert.Equal(t, int64(0), user1.ExpirationDate)
|
||||||
assert.Equal(t, int64(0), user2.ExpirationDate)
|
assert.Equal(t, int64(0), user2.ExpirationDate)
|
||||||
|
|
||||||
|
@ -1226,7 +1311,7 @@ func TestGroupSettingsOverride(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
user, err = dataprovider.CheckUserAndPass(defaultUsername, defaultPassword, "", common.ProtocolHTTP)
|
user, err = dataprovider.CheckUserAndPass(defaultUsername, defaultPassword, "", common.ProtocolHTTP)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, user.VirtualFolders, 3)
|
assert.Len(t, user.VirtualFolders, 4)
|
||||||
assert.Equal(t, sdk.LocalFilesystemProvider, user.FsConfig.Provider)
|
assert.Equal(t, sdk.LocalFilesystemProvider, user.FsConfig.Provider)
|
||||||
assert.Equal(t, int64(0), user.DownloadBandwidth)
|
assert.Equal(t, int64(0), user.DownloadBandwidth)
|
||||||
assert.Equal(t, int64(0), user.UploadBandwidth)
|
assert.Equal(t, int64(0), user.UploadBandwidth)
|
||||||
|
@ -1272,7 +1357,7 @@ func TestGroupSettingsOverride(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
user, err = dataprovider.CheckUserAndPass(defaultUsername, defaultPassword, "", common.ProtocolHTTP)
|
user, err = dataprovider.CheckUserAndPass(defaultUsername, defaultPassword, "", common.ProtocolHTTP)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, user.VirtualFolders, 3)
|
assert.Len(t, user.VirtualFolders, 4)
|
||||||
assert.Equal(t, user.CreatedAt+int64(group1.UserSettings.ExpiresIn)*86400000, user.ExpirationDate)
|
assert.Equal(t, user.CreatedAt+int64(group1.UserSettings.ExpiresIn)*86400000, user.ExpirationDate)
|
||||||
assert.Equal(t, group1.UserSettings.Filters.PasswordStrength, user.Filters.PasswordStrength)
|
assert.Equal(t, group1.UserSettings.Filters.PasswordStrength, user.Filters.PasswordStrength)
|
||||||
assert.Equal(t, sdk.SFTPFilesystemProvider, user.FsConfig.Provider)
|
assert.Equal(t, sdk.SFTPFilesystemProvider, user.FsConfig.Provider)
|
||||||
|
@ -1307,6 +1392,8 @@ func TestGroupSettingsOverride(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: folderName2}, http.StatusOK)
|
_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: folderName2}, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: folderName3}, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfigs(t *testing.T) {
|
func TestConfigs(t *testing.T) {
|
||||||
|
@ -12208,7 +12295,7 @@ func TestWebClientMaxConnections(t *testing.T) {
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
|
||||||
// now add a fake connection
|
// now add a fake connection
|
||||||
fs := vfs.NewOsFs("id", os.TempDir(), "")
|
fs := vfs.NewOsFs("id", os.TempDir(), "", nil)
|
||||||
connection := &httpd.Connection{
|
connection := &httpd.Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolHTTP, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolHTTP, "", "", user),
|
||||||
}
|
}
|
||||||
|
@ -12399,7 +12486,7 @@ func TestMaxSessions(t *testing.T) {
|
||||||
apiToken, err := getJWTAPIUserTokenFromTestServer(defaultUsername, defaultPassword)
|
apiToken, err := getJWTAPIUserTokenFromTestServer(defaultUsername, defaultPassword)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// now add a fake connection
|
// now add a fake connection
|
||||||
fs := vfs.NewOsFs("id", os.TempDir(), "")
|
fs := vfs.NewOsFs("id", os.TempDir(), "", nil)
|
||||||
connection := &httpd.Connection{
|
connection := &httpd.Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolHTTP, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolHTTP, "", "", user),
|
||||||
}
|
}
|
||||||
|
@ -13508,7 +13595,7 @@ func TestShareMaxSessions(t *testing.T) {
|
||||||
rr = executeRequest(req)
|
rr = executeRequest(req)
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
// add a fake connection
|
// add a fake connection
|
||||||
fs := vfs.NewOsFs("id", os.TempDir(), "")
|
fs := vfs.NewOsFs("id", os.TempDir(), "", nil)
|
||||||
connection := &httpd.Connection{
|
connection := &httpd.Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolHTTP, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolHTTP, "", "", user),
|
||||||
}
|
}
|
||||||
|
@ -15887,6 +15974,105 @@ func TestWebFilesAPI(t *testing.T) {
|
||||||
checkResponseCode(t, http.StatusNotFound, rr)
|
checkResponseCode(t, http.StatusNotFound, rr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBufferedWebFilesAPI(t *testing.T) {
|
||||||
|
u := getTestUser()
|
||||||
|
u.FsConfig.OSConfig = sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: 1,
|
||||||
|
WriteBufferSize: 1,
|
||||||
|
}
|
||||||
|
vdirPath := "/crypted"
|
||||||
|
mappedPath := filepath.Join(os.TempDir(), util.GenerateUniqueID())
|
||||||
|
folderName := filepath.Base(mappedPath)
|
||||||
|
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
|
||||||
|
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||||
|
Name: folderName,
|
||||||
|
MappedPath: mappedPath,
|
||||||
|
FsConfig: vfs.Filesystem{
|
||||||
|
Provider: sdk.CryptedFilesystemProvider,
|
||||||
|
CryptConfig: vfs.CryptFsConfig{
|
||||||
|
OSFsConfig: sdk.OSFsConfig{
|
||||||
|
WriteBufferSize: 3,
|
||||||
|
ReadBufferSize: 2,
|
||||||
|
},
|
||||||
|
Passphrase: kms.NewPlainSecret(defaultPassword),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
VirtualPath: vdirPath,
|
||||||
|
QuotaFiles: -1,
|
||||||
|
QuotaSize: -1,
|
||||||
|
})
|
||||||
|
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
webAPIToken, err := getJWTAPIUserTokenFromTestServer(defaultUsername, defaultPassword)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
body := new(bytes.Buffer)
|
||||||
|
writer := multipart.NewWriter(body)
|
||||||
|
part1, err := writer.CreateFormFile("filenames", "file1.txt")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = part1.Write([]byte("file1 content"))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = writer.Close()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
reader := bytes.NewReader(body.Bytes())
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, userFilesPath, reader)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Add("Content-Type", writer.FormDataContentType())
|
||||||
|
setBearerForReq(req, webAPIToken)
|
||||||
|
rr := executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusCreated, rr)
|
||||||
|
|
||||||
|
_, err = reader.Seek(0, io.SeekStart)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req, err = http.NewRequest(http.MethodPost, userFilesPath+"?path="+url.QueryEscape(vdirPath), reader)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Add("Content-Type", writer.FormDataContentType())
|
||||||
|
setBearerForReq(req, webAPIToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusCreated, rr)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, userFilesPath+"?path=file1.txt", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setBearerForReq(req, webAPIToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
assert.Equal(t, "file1 content", rr.Body.String())
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, userFilesPath+"?path="+url.QueryEscape(vdirPath+"/file1.txt"), nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
setBearerForReq(req, webAPIToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusOK, rr)
|
||||||
|
assert.Equal(t, "file1 content", rr.Body.String())
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, userFilesPath+"?path=file1.txt", nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Range", "bytes=2-")
|
||||||
|
setBearerForReq(req, webAPIToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusPartialContent, rr)
|
||||||
|
assert.Equal(t, "le1 content", rr.Body.String())
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, userFilesPath+"?path="+url.QueryEscape(vdirPath+"/file1.txt"), nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
req.Header.Set("Range", "bytes=3-6")
|
||||||
|
setBearerForReq(req, webAPIToken)
|
||||||
|
rr = executeRequest(req)
|
||||||
|
checkResponseCode(t, http.StatusPartialContent, rr)
|
||||||
|
assert.Equal(t, "e1 c", rr.Body.String())
|
||||||
|
|
||||||
|
_, err = httpdtest.RemoveUser(user, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.RemoveAll(user.GetHomeDir())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: folderName}, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.RemoveAll(mappedPath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestStartDirectory(t *testing.T) {
|
func TestStartDirectory(t *testing.T) {
|
||||||
u := getTestUser()
|
u := getTestUser()
|
||||||
u.Filters.StartDirectory = "/start/dir"
|
u.Filters.StartDirectory = "/start/dir"
|
||||||
|
@ -19009,6 +19195,8 @@ func TestWebUserAddMock(t *testing.T) {
|
||||||
form.Set("username", user.Username)
|
form.Set("username", user.Username)
|
||||||
form.Set("email", user.Email)
|
form.Set("email", user.Email)
|
||||||
form.Set("home_dir", user.HomeDir)
|
form.Set("home_dir", user.HomeDir)
|
||||||
|
form.Set("osfs_read_buffer_size", "2")
|
||||||
|
form.Set("osfs_write_buffer_size", "3")
|
||||||
form.Set("password", user.Password)
|
form.Set("password", user.Password)
|
||||||
form.Set("primary_group", group1.Name)
|
form.Set("primary_group", group1.Name)
|
||||||
form.Set("secondary_groups", group2.Name)
|
form.Set("secondary_groups", group2.Name)
|
||||||
|
@ -19344,6 +19532,8 @@ func TestWebUserAddMock(t *testing.T) {
|
||||||
err = render.DecodeJSON(rr.Body, &newUser)
|
err = render.DecodeJSON(rr.Body, &newUser)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, user.UID, newUser.UID)
|
assert.Equal(t, user.UID, newUser.UID)
|
||||||
|
assert.Equal(t, 2, newUser.FsConfig.OSConfig.ReadBufferSize)
|
||||||
|
assert.Equal(t, 3, newUser.FsConfig.OSConfig.WriteBufferSize)
|
||||||
assert.Equal(t, user.UploadBandwidth, newUser.UploadBandwidth)
|
assert.Equal(t, user.UploadBandwidth, newUser.UploadBandwidth)
|
||||||
assert.Equal(t, user.DownloadBandwidth, newUser.DownloadBandwidth)
|
assert.Equal(t, user.DownloadBandwidth, newUser.DownloadBandwidth)
|
||||||
assert.Equal(t, user.UploadDataTransfer, newUser.UploadDataTransfer)
|
assert.Equal(t, user.UploadDataTransfer, newUser.UploadDataTransfer)
|
||||||
|
@ -21088,6 +21278,8 @@ func TestWebUserCryptMock(t *testing.T) {
|
||||||
form.Set("denied_ip", "")
|
form.Set("denied_ip", "")
|
||||||
form.Set("fs_provider", "4")
|
form.Set("fs_provider", "4")
|
||||||
form.Set("crypt_passphrase", "")
|
form.Set("crypt_passphrase", "")
|
||||||
|
form.Set("cryptfs_read_buffer_size", "1")
|
||||||
|
form.Set("cryptfs_write_buffer_size", "2")
|
||||||
form.Set("pattern_path0", "/dir1")
|
form.Set("pattern_path0", "/dir1")
|
||||||
form.Set("patterns0", "*.jpg,*.png")
|
form.Set("patterns0", "*.jpg,*.png")
|
||||||
form.Set("pattern_type0", "allowed")
|
form.Set("pattern_type0", "allowed")
|
||||||
|
@ -21125,6 +21317,8 @@ func TestWebUserCryptMock(t *testing.T) {
|
||||||
assert.NotEmpty(t, updateUser.FsConfig.CryptConfig.Passphrase.GetPayload())
|
assert.NotEmpty(t, updateUser.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||||
assert.Empty(t, updateUser.FsConfig.CryptConfig.Passphrase.GetKey())
|
assert.Empty(t, updateUser.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||||
assert.Empty(t, updateUser.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
assert.Empty(t, updateUser.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||||
|
assert.Equal(t, 1, updateUser.FsConfig.CryptConfig.ReadBufferSize)
|
||||||
|
assert.Equal(t, 2, updateUser.FsConfig.CryptConfig.WriteBufferSize)
|
||||||
// now check that a redacted password is not saved
|
// now check that a redacted password is not saved
|
||||||
form.Set("crypt_passphrase", redactedSecret+" ")
|
form.Set("crypt_passphrase", redactedSecret+" ")
|
||||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||||
|
@ -22637,6 +22831,8 @@ func TestAddWebFoldersMock(t *testing.T) {
|
||||||
form.Set("mapped_path", mappedPath)
|
form.Set("mapped_path", mappedPath)
|
||||||
form.Set("name", folderName)
|
form.Set("name", folderName)
|
||||||
form.Set("description", folderDesc)
|
form.Set("description", folderDesc)
|
||||||
|
form.Set("osfs_read_buffer_size", "3")
|
||||||
|
form.Set("osfs_write_buffer_size", "4")
|
||||||
b, contentType, err := getMultipartFormData(form, "", "")
|
b, contentType, err := getMultipartFormData(form, "", "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
req, err := http.NewRequest(http.MethodPost, webFolderPath, &b)
|
req, err := http.NewRequest(http.MethodPost, webFolderPath, &b)
|
||||||
|
@ -22690,6 +22886,8 @@ func TestAddWebFoldersMock(t *testing.T) {
|
||||||
assert.Equal(t, mappedPath, folder.MappedPath)
|
assert.Equal(t, mappedPath, folder.MappedPath)
|
||||||
assert.Equal(t, folderName, folder.Name)
|
assert.Equal(t, folderName, folder.Name)
|
||||||
assert.Equal(t, folderDesc, folder.Description)
|
assert.Equal(t, folderDesc, folder.Description)
|
||||||
|
assert.Equal(t, 3, folder.FsConfig.OSConfig.ReadBufferSize)
|
||||||
|
assert.Equal(t, 4, folder.FsConfig.OSConfig.WriteBufferSize)
|
||||||
// cleanup
|
// cleanup
|
||||||
req, _ = http.NewRequest(http.MethodDelete, path.Join(folderPath, folderName), nil)
|
req, _ = http.NewRequest(http.MethodDelete, path.Join(folderPath, folderName), nil)
|
||||||
setBearerForReq(req, apiToken)
|
setBearerForReq(req, apiToken)
|
||||||
|
|
|
@ -1747,10 +1747,25 @@ func getAzureConfig(r *http.Request) (vfs.AzBlobFsConfig, error) {
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getOsConfigFromPostFields(r *http.Request, readBufferField, writeBufferField string) sdk.OSFsConfig {
|
||||||
|
config := sdk.OSFsConfig{}
|
||||||
|
readBuffer, err := strconv.Atoi(r.Form.Get(readBufferField))
|
||||||
|
if err == nil {
|
||||||
|
config.ReadBufferSize = readBuffer
|
||||||
|
}
|
||||||
|
writeBuffer, err := strconv.Atoi(r.Form.Get(writeBufferField))
|
||||||
|
if err == nil {
|
||||||
|
config.WriteBufferSize = writeBuffer
|
||||||
|
}
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
func getFsConfigFromPostFields(r *http.Request) (vfs.Filesystem, error) {
|
func getFsConfigFromPostFields(r *http.Request) (vfs.Filesystem, error) {
|
||||||
var fs vfs.Filesystem
|
var fs vfs.Filesystem
|
||||||
fs.Provider = sdk.GetProviderByName(r.Form.Get("fs_provider"))
|
fs.Provider = sdk.GetProviderByName(r.Form.Get("fs_provider"))
|
||||||
switch fs.Provider {
|
switch fs.Provider {
|
||||||
|
case sdk.LocalFilesystemProvider:
|
||||||
|
fs.OSConfig = getOsConfigFromPostFields(r, "osfs_read_buffer_size", "osfs_write_buffer_size")
|
||||||
case sdk.S3FilesystemProvider:
|
case sdk.S3FilesystemProvider:
|
||||||
config, err := getS3Config(r)
|
config, err := getS3Config(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1771,6 +1786,7 @@ func getFsConfigFromPostFields(r *http.Request) (vfs.Filesystem, error) {
|
||||||
fs.GCSConfig = config
|
fs.GCSConfig = config
|
||||||
case sdk.CryptedFilesystemProvider:
|
case sdk.CryptedFilesystemProvider:
|
||||||
fs.CryptConfig.Passphrase = getSecretFromFormField(r, "crypt_passphrase")
|
fs.CryptConfig.Passphrase = getSecretFromFormField(r, "crypt_passphrase")
|
||||||
|
fs.CryptConfig.OSFsConfig = getOsConfigFromPostFields(r, "cryptfs_read_buffer_size", "cryptfs_write_buffer_size")
|
||||||
case sdk.SFTPFilesystemProvider:
|
case sdk.SFTPFilesystemProvider:
|
||||||
config, err := getSFTPConfig(r)
|
config, err := getSFTPConfig(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -2129,6 +2129,12 @@ func compareFsConfig(expected *vfs.Filesystem, actual *vfs.Filesystem) error {
|
||||||
if expected.Provider != actual.Provider {
|
if expected.Provider != actual.Provider {
|
||||||
return errors.New("fs provider mismatch")
|
return errors.New("fs provider mismatch")
|
||||||
}
|
}
|
||||||
|
if expected.OSConfig.ReadBufferSize != actual.OSConfig.ReadBufferSize {
|
||||||
|
return fmt.Errorf("read buffer size mismatch")
|
||||||
|
}
|
||||||
|
if expected.OSConfig.WriteBufferSize != actual.OSConfig.WriteBufferSize {
|
||||||
|
return fmt.Errorf("write buffer size mismatch")
|
||||||
|
}
|
||||||
if err := compareS3Config(expected, actual); err != nil {
|
if err := compareS3Config(expected, actual); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2141,6 +2147,12 @@ func compareFsConfig(expected *vfs.Filesystem, actual *vfs.Filesystem) error {
|
||||||
if err := checkEncryptedSecret(expected.CryptConfig.Passphrase, actual.CryptConfig.Passphrase); err != nil {
|
if err := checkEncryptedSecret(expected.CryptConfig.Passphrase, actual.CryptConfig.Passphrase); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if expected.CryptConfig.ReadBufferSize != actual.CryptConfig.ReadBufferSize {
|
||||||
|
return fmt.Errorf("crypt read buffer size mismatch")
|
||||||
|
}
|
||||||
|
if expected.CryptConfig.WriteBufferSize != actual.CryptConfig.WriteBufferSize {
|
||||||
|
return fmt.Errorf("crypt write buffer size mismatch")
|
||||||
|
}
|
||||||
if err := compareSFTPFsConfig(expected, actual); err != nil {
|
if err := compareSFTPFsConfig(expected, actual); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -149,7 +149,7 @@ func (fs MockOsFs) Rename(source, target string) (int, int64, error) {
|
||||||
|
|
||||||
func newMockOsFs(err, statErr error, atomicUpload bool, connectionID, rootDir string) vfs.Fs {
|
func newMockOsFs(err, statErr error, atomicUpload bool, connectionID, rootDir string) vfs.Fs {
|
||||||
return &MockOsFs{
|
return &MockOsFs{
|
||||||
Fs: vfs.NewOsFs(connectionID, rootDir, ""),
|
Fs: vfs.NewOsFs(connectionID, rootDir, "", nil),
|
||||||
err: err,
|
err: err,
|
||||||
statErr: statErr,
|
statErr: statErr,
|
||||||
isAtomicUploadSupported: atomicUpload,
|
isAtomicUploadSupported: atomicUpload,
|
||||||
|
@ -183,7 +183,7 @@ func TestUploadResumeInvalidOffset(t *testing.T) {
|
||||||
Username: "testuser",
|
Username: "testuser",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
conn := common.NewBaseConnection("", common.ProtocolSFTP, "", "", user)
|
conn := common.NewBaseConnection("", common.ProtocolSFTP, "", "", user)
|
||||||
baseTransfer := common.NewBaseTransfer(file, conn, nil, file.Name(), file.Name(), testfile,
|
baseTransfer := common.NewBaseTransfer(file, conn, nil, file.Name(), file.Name(), testfile,
|
||||||
common.TransferUpload, 10, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
common.TransferUpload, 10, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
||||||
|
@ -214,7 +214,7 @@ func TestReadWriteErrors(t *testing.T) {
|
||||||
Username: "testuser",
|
Username: "testuser",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
conn := common.NewBaseConnection("", common.ProtocolSFTP, "", "", user)
|
conn := common.NewBaseConnection("", common.ProtocolSFTP, "", "", user)
|
||||||
baseTransfer := common.NewBaseTransfer(file, conn, nil, file.Name(), file.Name(), testfile, common.TransferDownload,
|
baseTransfer := common.NewBaseTransfer(file, conn, nil, file.Name(), file.Name(), testfile, common.TransferDownload,
|
||||||
0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
||||||
|
@ -288,7 +288,7 @@ func TestTransferCancelFn(t *testing.T) {
|
||||||
Username: "testuser",
|
Username: "testuser",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
conn := common.NewBaseConnection("", common.ProtocolSFTP, "", "", user)
|
conn := common.NewBaseConnection("", common.ProtocolSFTP, "", "", user)
|
||||||
baseTransfer := common.NewBaseTransfer(file, conn, cancelFn, file.Name(), file.Name(), testfile, common.TransferDownload,
|
baseTransfer := common.NewBaseTransfer(file, conn, cancelFn, file.Name(), file.Name(), testfile, common.TransferDownload,
|
||||||
0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
||||||
|
@ -311,7 +311,7 @@ func TestTransferCancelFn(t *testing.T) {
|
||||||
|
|
||||||
func TestUploadFiles(t *testing.T) {
|
func TestUploadFiles(t *testing.T) {
|
||||||
common.Config.UploadMode = common.UploadModeAtomic
|
common.Config.UploadMode = common.UploadModeAtomic
|
||||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
fs := vfs.NewOsFs("123", os.TempDir(), "", nil)
|
||||||
u := dataprovider.User{}
|
u := dataprovider.User{}
|
||||||
c := Connection{
|
c := Connection{
|
||||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, "", "", u),
|
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, "", "", u),
|
||||||
|
@ -1213,7 +1213,7 @@ func TestSCPParseUploadMessage(t *testing.T) {
|
||||||
StdErrBuffer: bytes.NewBuffer(stdErrBuf),
|
StdErrBuffer: bytes.NewBuffer(stdErrBuf),
|
||||||
ReadError: nil,
|
ReadError: nil,
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, "", "", dataprovider.User{
|
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, "", "", dataprovider.User{
|
||||||
BaseUser: sdk.BaseUser{
|
BaseUser: sdk.BaseUser{
|
||||||
|
@ -1470,7 +1470,7 @@ func TestSCPRecursiveDownloadErrors(t *testing.T) {
|
||||||
err := client.Close()
|
err := client.Close()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}()
|
}()
|
||||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
fs := vfs.NewOsFs("123", os.TempDir(), "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", "", dataprovider.User{
|
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", "", dataprovider.User{
|
||||||
BaseUser: sdk.BaseUser{
|
BaseUser: sdk.BaseUser{
|
||||||
|
@ -1593,7 +1593,7 @@ func TestSCPDownloadFileData(t *testing.T) {
|
||||||
ReadError: nil,
|
ReadError: nil,
|
||||||
WriteError: writeErr,
|
WriteError: writeErr,
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", "", dataprovider.User{BaseUser: sdk.BaseUser{HomeDir: os.TempDir()}}),
|
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", "", dataprovider.User{BaseUser: sdk.BaseUser{HomeDir: os.TempDir()}}),
|
||||||
channel: &mockSSHChannelReadErr,
|
channel: &mockSSHChannelReadErr,
|
||||||
|
@ -1645,7 +1645,7 @@ func TestSCPUploadFiledata(t *testing.T) {
|
||||||
Username: "testuser",
|
Username: "testuser",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", "", user),
|
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", "", user),
|
||||||
channel: &mockSSHChannel,
|
channel: &mockSSHChannel,
|
||||||
|
@ -1736,7 +1736,7 @@ func TestUploadError(t *testing.T) {
|
||||||
Username: "testuser",
|
Username: "testuser",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
fs := vfs.NewOsFs("", os.TempDir(), "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", "", user),
|
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", "", user),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1428,7 +1428,17 @@ func TestUploadResume(t *testing.T) {
|
||||||
u = getTestSFTPUser(usePubKey)
|
u = getTestSFTPUser(usePubKey)
|
||||||
sftpUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
sftpUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
for _, user := range []dataprovider.User{localUser, sftpUser} {
|
u = getTestUser(usePubKey)
|
||||||
|
u.FsConfig.OSConfig = sdk.OSFsConfig{
|
||||||
|
WriteBufferSize: 1,
|
||||||
|
ReadBufferSize: 1,
|
||||||
|
}
|
||||||
|
u.Username += "_buffered"
|
||||||
|
u.HomeDir += "_with_buf"
|
||||||
|
bufferedUser, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
for _, user := range []dataprovider.User{localUser, sftpUser, bufferedUser} {
|
||||||
conn, client, err := getSftpClient(user, usePubKey)
|
conn, client, err := getSftpClient(user, usePubKey)
|
||||||
if assert.NoError(t, err) {
|
if assert.NoError(t, err) {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
@ -1475,8 +1485,12 @@ func TestUploadResume(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = httpdtest.RemoveUser(localUser, http.StatusOK)
|
_, err = httpdtest.RemoveUser(localUser, http.StatusOK)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveUser(bufferedUser, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
err = os.RemoveAll(localUser.GetHomeDir())
|
err = os.RemoveAll(localUser.GetHomeDir())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
err = os.RemoveAll(bufferedUser.GetHomeDir())
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDirCommands(t *testing.T) {
|
func TestDirCommands(t *testing.T) {
|
||||||
|
@ -5570,6 +5584,129 @@ func TestNestedVirtualFolders(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBufferedUser(t *testing.T) {
|
||||||
|
usePubKey := true
|
||||||
|
u := getTestUser(usePubKey)
|
||||||
|
u.QuotaFiles = 1000
|
||||||
|
u.FsConfig.OSConfig = sdk.OSFsConfig{
|
||||||
|
WriteBufferSize: 2,
|
||||||
|
ReadBufferSize: 1,
|
||||||
|
}
|
||||||
|
vdirPath := "/crypted"
|
||||||
|
mappedPath := filepath.Join(os.TempDir(), util.GenerateUniqueID())
|
||||||
|
folderName := filepath.Base(mappedPath)
|
||||||
|
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
|
||||||
|
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||||
|
Name: folderName,
|
||||||
|
MappedPath: mappedPath,
|
||||||
|
FsConfig: vfs.Filesystem{
|
||||||
|
Provider: sdk.CryptedFilesystemProvider,
|
||||||
|
CryptConfig: vfs.CryptFsConfig{
|
||||||
|
OSFsConfig: sdk.OSFsConfig{
|
||||||
|
WriteBufferSize: 3,
|
||||||
|
ReadBufferSize: 2,
|
||||||
|
},
|
||||||
|
Passphrase: kms.NewPlainSecret(defaultPassword),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
VirtualPath: vdirPath,
|
||||||
|
QuotaFiles: -1,
|
||||||
|
QuotaSize: -1,
|
||||||
|
})
|
||||||
|
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
conn, client, err := getSftpClient(user, usePubKey)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
defer conn.Close()
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
expectedQuotaSize := int64(0)
|
||||||
|
expectedQuotaFiles := 0
|
||||||
|
fileSize := int64(32768)
|
||||||
|
err = writeSFTPFile(testFileName, fileSize, client)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedQuotaSize += fileSize
|
||||||
|
expectedQuotaFiles++
|
||||||
|
err = writeSFTPFile(path.Join(vdirPath, testFileName), fileSize, client)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedQuotaSize += fileSize
|
||||||
|
expectedQuotaFiles++
|
||||||
|
user, _, err = httpdtest.GetUserByUsername(user.Username, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||||
|
assert.Greater(t, user.UsedQuotaSize, expectedQuotaSize)
|
||||||
|
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||||
|
err = sftpDownloadFile(testFileName, localDownloadPath, fileSize, client)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = sftpDownloadFile(path.Join(vdirPath, testFileName), localDownloadPath, fileSize, client)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.Remove(localDownloadPath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = client.Remove(testFileName)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = client.Remove(path.Join(vdirPath, testFileName))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
data := []byte("test data")
|
||||||
|
f, err := client.OpenFile(testFileName, os.O_WRONLY|os.O_CREATE)
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
n, err := f.Write(data)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, len(data), n)
|
||||||
|
err = f.Truncate(2)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedQuotaSize := int64(2)
|
||||||
|
expectedQuotaFiles := 0
|
||||||
|
user, _, err = httpdtest.GetUserByUsername(user.Username, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||||
|
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||||
|
_, err = f.Seek(expectedQuotaSize, io.SeekStart)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
n, err = f.Write(data)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, len(data), n)
|
||||||
|
err = f.Truncate(5)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedQuotaSize = int64(5)
|
||||||
|
user, _, err = httpdtest.GetUserByUsername(user.Username, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||||
|
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||||
|
_, err = f.Seek(expectedQuotaSize, io.SeekStart)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
n, err = f.Write(data)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, len(data), n)
|
||||||
|
err = f.Close()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedQuotaSize = int64(5) + int64(len(data))
|
||||||
|
expectedQuotaFiles = 1
|
||||||
|
user, _, err = httpdtest.GetUserByUsername(user.Username, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||||
|
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||||
|
}
|
||||||
|
// now truncate by path
|
||||||
|
err = client.Truncate(testFileName, 5)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
user, _, err = httpdtest.GetUserByUsername(user.Username, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, user.UsedQuotaFiles)
|
||||||
|
assert.Equal(t, int64(5), user.UsedQuotaSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = httpdtest.RemoveUser(user, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.RemoveAll(user.GetHomeDir())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: folderName}, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.RemoveAll(mappedPath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestTruncateQuotaLimits(t *testing.T) {
|
func TestTruncateQuotaLimits(t *testing.T) {
|
||||||
usePubKey := true
|
usePubKey := true
|
||||||
u := getTestUser(usePubKey)
|
u := getTestUser(usePubKey)
|
||||||
|
@ -8047,7 +8184,7 @@ func TestRootDirCommands(t *testing.T) {
|
||||||
func TestRelativePaths(t *testing.T) {
|
func TestRelativePaths(t *testing.T) {
|
||||||
user := getTestUser(true)
|
user := getTestUser(true)
|
||||||
var path, rel string
|
var path, rel string
|
||||||
filesystems := []vfs.Fs{vfs.NewOsFs("", user.GetHomeDir(), "")}
|
filesystems := []vfs.Fs{vfs.NewOsFs("", user.GetHomeDir(), "", nil)}
|
||||||
keyPrefix := strings.TrimPrefix(user.GetHomeDir(), "/") + "/"
|
keyPrefix := strings.TrimPrefix(user.GetHomeDir(), "/") + "/"
|
||||||
s3config := vfs.S3FsConfig{
|
s3config := vfs.S3FsConfig{
|
||||||
BaseS3FsConfig: sdk.BaseS3FsConfig{
|
BaseS3FsConfig: sdk.BaseS3FsConfig{
|
||||||
|
@ -8112,7 +8249,7 @@ func TestResolvePaths(t *testing.T) {
|
||||||
user := getTestUser(true)
|
user := getTestUser(true)
|
||||||
var path, resolved string
|
var path, resolved string
|
||||||
var err error
|
var err error
|
||||||
filesystems := []vfs.Fs{vfs.NewOsFs("", user.GetHomeDir(), "")}
|
filesystems := []vfs.Fs{vfs.NewOsFs("", user.GetHomeDir(), "", nil)}
|
||||||
keyPrefix := strings.TrimPrefix(user.GetHomeDir(), "/") + "/"
|
keyPrefix := strings.TrimPrefix(user.GetHomeDir(), "/") + "/"
|
||||||
s3config := vfs.S3FsConfig{
|
s3config := vfs.S3FsConfig{
|
||||||
BaseS3FsConfig: sdk.BaseS3FsConfig{
|
BaseS3FsConfig: sdk.BaseS3FsConfig{
|
||||||
|
@ -8175,8 +8312,8 @@ func TestVirtualRelativePaths(t *testing.T) {
|
||||||
})
|
})
|
||||||
err := os.MkdirAll(mappedPath, os.ModePerm)
|
err := os.MkdirAll(mappedPath, os.ModePerm)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
fsRoot := vfs.NewOsFs("", user.GetHomeDir(), "")
|
fsRoot := vfs.NewOsFs("", user.GetHomeDir(), "", nil)
|
||||||
fsVdir := vfs.NewOsFs("", mappedPath, vdirPath)
|
fsVdir := vfs.NewOsFs("", mappedPath, vdirPath, nil)
|
||||||
rel := fsVdir.GetRelativePath(mappedPath)
|
rel := fsVdir.GetRelativePath(mappedPath)
|
||||||
assert.Equal(t, vdirPath, rel)
|
assert.Equal(t, vdirPath, rel)
|
||||||
rel = fsRoot.GetRelativePath(filepath.Join(mappedPath, ".."))
|
rel = fsRoot.GetRelativePath(filepath.Join(mappedPath, ".."))
|
||||||
|
|
|
@ -517,7 +517,7 @@ func (*AzureBlobFs) isBadRequestError(err error) bool {
|
||||||
// CheckRootPath creates the specified local root directory if it does not exists
|
// CheckRootPath creates the specified local root directory if it does not exists
|
||||||
func (fs *AzureBlobFs) CheckRootPath(username string, uid int, gid int) bool {
|
func (fs *AzureBlobFs) CheckRootPath(username string, uid int, gid int) bool {
|
||||||
// we need a local directory for temporary files
|
// we need a local directory for temporary files
|
||||||
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "")
|
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "", nil)
|
||||||
return osFs.CheckRootPath(username, uid, gid)
|
return osFs.CheckRootPath(username, uid, gid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
package vfs
|
package vfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
@ -59,6 +60,8 @@ func NewCryptFs(connectionID, rootDir, mountPath string, config CryptFsConfig) (
|
||||||
connectionID: connectionID,
|
connectionID: connectionID,
|
||||||
rootDir: rootDir,
|
rootDir: rootDir,
|
||||||
mountPath: getMountPath(mountPath),
|
mountPath: getMountPath(mountPath),
|
||||||
|
readBufferSize: config.OSFsConfig.ReadBufferSize * 1024 * 1024,
|
||||||
|
writeBufferSize: config.OSFsConfig.WriteBufferSize * 1024 * 1024,
|
||||||
},
|
},
|
||||||
masterKey: []byte(config.Passphrase.GetPayload()),
|
masterKey: []byte(config.Passphrase.GetPayload()),
|
||||||
}
|
}
|
||||||
|
@ -103,11 +106,11 @@ func (fs *CryptFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt,
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if offset == 0 {
|
if offset == 0 {
|
||||||
n, err = sio.Decrypt(w, f, fs.getSIOConfig(key))
|
n, err = fs.decryptWrapper(w, f, fs.getSIOConfig(key))
|
||||||
} else {
|
} else {
|
||||||
var readerAt io.ReaderAt
|
var readerAt io.ReaderAt
|
||||||
var readed, written int
|
var readed, written int
|
||||||
buf := make([]byte, 65536)
|
buf := make([]byte, 65568)
|
||||||
wrapper := &cryptedFileWrapper{
|
wrapper := &cryptedFileWrapper{
|
||||||
File: f,
|
File: f,
|
||||||
}
|
}
|
||||||
|
@ -150,14 +153,8 @@ func (fs *CryptFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates or opens the named file for writing
|
// Create creates or opens the named file for writing
|
||||||
func (fs *CryptFs) Create(name string, flag, _ int) (File, *PipeWriter, func(), error) {
|
func (fs *CryptFs) Create(name string, _, _ int) (File, *PipeWriter, func(), error) {
|
||||||
var err error
|
f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||||
var f *os.File
|
|
||||||
if flag == 0 {
|
|
||||||
f, err = os.Create(name)
|
|
||||||
} else {
|
|
||||||
f, err = os.OpenFile(name, flag, 0666)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -192,7 +189,18 @@ func (fs *CryptFs) Create(name string, flag, _ int) (File, *PipeWriter, func(),
|
||||||
p := NewPipeWriter(w)
|
p := NewPipeWriter(w)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
n, err := sio.Encrypt(f, r, fs.getSIOConfig(key))
|
var n int64
|
||||||
|
var err error
|
||||||
|
if fs.writeBufferSize <= 0 {
|
||||||
|
n, err = sio.Encrypt(f, r, fs.getSIOConfig(key))
|
||||||
|
} else {
|
||||||
|
bw := bufio.NewWriterSize(f, fs.writeBufferSize)
|
||||||
|
n, err = fs.encryptWrapper(bw, r, fs.getSIOConfig(key))
|
||||||
|
errFlush := bw.Flush()
|
||||||
|
if err == nil && errFlush != nil {
|
||||||
|
err = errFlush
|
||||||
|
}
|
||||||
|
}
|
||||||
errClose := f.Close()
|
errClose := f.Close()
|
||||||
if err == nil && errClose != nil {
|
if err == nil && errClose != nil {
|
||||||
err = errClose
|
err = errClose
|
||||||
|
@ -311,6 +319,26 @@ func (fs *CryptFs) getFileAndEncryptionKey(name string) (*os.File, [32]byte, err
|
||||||
return f, key, err
|
return f, key, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (*CryptFs) encryptWrapper(dst io.Writer, src io.Reader, config sio.Config) (int64, error) {
|
||||||
|
encReader, err := sio.EncryptReader(src, config)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return doCopy(dst, encReader, make([]byte, 65568))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *CryptFs) decryptWrapper(dst io.Writer, src io.Reader, config sio.Config) (int64, error) {
|
||||||
|
if fs.readBufferSize <= 0 {
|
||||||
|
return sio.Decrypt(dst, src, config)
|
||||||
|
}
|
||||||
|
br := bufio.NewReaderSize(src, fs.readBufferSize)
|
||||||
|
decReader, err := sio.DecryptReader(br, config)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return doCopy(dst, decReader, make([]byte, 65568))
|
||||||
|
}
|
||||||
|
|
||||||
func isZeroBytesDownload(f *os.File, offset int64) (bool, error) {
|
func isZeroBytesDownload(f *os.File, offset int64) (bool, error) {
|
||||||
info, err := f.Stat()
|
info, err := f.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
type Filesystem struct {
|
type Filesystem struct {
|
||||||
RedactedSecret string `json:"-"`
|
RedactedSecret string `json:"-"`
|
||||||
Provider sdk.FilesystemProvider `json:"provider"`
|
Provider sdk.FilesystemProvider `json:"provider"`
|
||||||
|
OSConfig sdk.OSFsConfig `json:"osconfig,omitempty"`
|
||||||
S3Config S3FsConfig `json:"s3config,omitempty"`
|
S3Config S3FsConfig `json:"s3config,omitempty"`
|
||||||
GCSConfig GCSFsConfig `json:"gcsconfig,omitempty"`
|
GCSConfig GCSFsConfig `json:"gcsconfig,omitempty"`
|
||||||
AzBlobConfig AzBlobFsConfig `json:"azblobconfig,omitempty"`
|
AzBlobConfig AzBlobFsConfig `json:"azblobconfig,omitempty"`
|
||||||
|
@ -169,6 +170,7 @@ func (f *Filesystem) Validate(additionalData string) error {
|
||||||
if err := f.S3Config.ValidateAndEncryptCredentials(additionalData); err != nil {
|
if err := f.S3Config.ValidateAndEncryptCredentials(additionalData); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
f.OSConfig = sdk.OSFsConfig{}
|
||||||
f.GCSConfig = GCSFsConfig{}
|
f.GCSConfig = GCSFsConfig{}
|
||||||
f.AzBlobConfig = AzBlobFsConfig{}
|
f.AzBlobConfig = AzBlobFsConfig{}
|
||||||
f.CryptConfig = CryptFsConfig{}
|
f.CryptConfig = CryptFsConfig{}
|
||||||
|
@ -179,6 +181,7 @@ func (f *Filesystem) Validate(additionalData string) error {
|
||||||
if err := f.GCSConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
if err := f.GCSConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
f.OSConfig = sdk.OSFsConfig{}
|
||||||
f.S3Config = S3FsConfig{}
|
f.S3Config = S3FsConfig{}
|
||||||
f.AzBlobConfig = AzBlobFsConfig{}
|
f.AzBlobConfig = AzBlobFsConfig{}
|
||||||
f.CryptConfig = CryptFsConfig{}
|
f.CryptConfig = CryptFsConfig{}
|
||||||
|
@ -189,6 +192,7 @@ func (f *Filesystem) Validate(additionalData string) error {
|
||||||
if err := f.AzBlobConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
if err := f.AzBlobConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
f.OSConfig = sdk.OSFsConfig{}
|
||||||
f.S3Config = S3FsConfig{}
|
f.S3Config = S3FsConfig{}
|
||||||
f.GCSConfig = GCSFsConfig{}
|
f.GCSConfig = GCSFsConfig{}
|
||||||
f.CryptConfig = CryptFsConfig{}
|
f.CryptConfig = CryptFsConfig{}
|
||||||
|
@ -199,16 +203,18 @@ func (f *Filesystem) Validate(additionalData string) error {
|
||||||
if err := f.CryptConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
if err := f.CryptConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
f.OSConfig = sdk.OSFsConfig{}
|
||||||
f.S3Config = S3FsConfig{}
|
f.S3Config = S3FsConfig{}
|
||||||
f.GCSConfig = GCSFsConfig{}
|
f.GCSConfig = GCSFsConfig{}
|
||||||
f.AzBlobConfig = AzBlobFsConfig{}
|
f.AzBlobConfig = AzBlobFsConfig{}
|
||||||
f.SFTPConfig = SFTPFsConfig{}
|
f.SFTPConfig = SFTPFsConfig{}
|
||||||
f.HTTPConfig = HTTPFsConfig{}
|
f.HTTPConfig = HTTPFsConfig{}
|
||||||
return nil
|
return validateOSFsConfig(&f.CryptConfig.OSFsConfig)
|
||||||
case sdk.SFTPFilesystemProvider:
|
case sdk.SFTPFilesystemProvider:
|
||||||
if err := f.SFTPConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
if err := f.SFTPConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
f.OSConfig = sdk.OSFsConfig{}
|
||||||
f.S3Config = S3FsConfig{}
|
f.S3Config = S3FsConfig{}
|
||||||
f.GCSConfig = GCSFsConfig{}
|
f.GCSConfig = GCSFsConfig{}
|
||||||
f.AzBlobConfig = AzBlobFsConfig{}
|
f.AzBlobConfig = AzBlobFsConfig{}
|
||||||
|
@ -219,6 +225,7 @@ func (f *Filesystem) Validate(additionalData string) error {
|
||||||
if err := f.HTTPConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
if err := f.HTTPConfig.ValidateAndEncryptCredentials(additionalData); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
f.OSConfig = sdk.OSFsConfig{}
|
||||||
f.S3Config = S3FsConfig{}
|
f.S3Config = S3FsConfig{}
|
||||||
f.GCSConfig = GCSFsConfig{}
|
f.GCSConfig = GCSFsConfig{}
|
||||||
f.AzBlobConfig = AzBlobFsConfig{}
|
f.AzBlobConfig = AzBlobFsConfig{}
|
||||||
|
@ -233,7 +240,7 @@ func (f *Filesystem) Validate(additionalData string) error {
|
||||||
f.CryptConfig = CryptFsConfig{}
|
f.CryptConfig = CryptFsConfig{}
|
||||||
f.SFTPConfig = SFTPFsConfig{}
|
f.SFTPConfig = SFTPFsConfig{}
|
||||||
f.HTTPConfig = HTTPFsConfig{}
|
f.HTTPConfig = HTTPFsConfig{}
|
||||||
return nil
|
return validateOSFsConfig(&f.OSConfig)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,6 +300,10 @@ func (f *Filesystem) GetACopy() Filesystem {
|
||||||
f.SetEmptySecretsIfNil()
|
f.SetEmptySecretsIfNil()
|
||||||
fs := Filesystem{
|
fs := Filesystem{
|
||||||
Provider: f.Provider,
|
Provider: f.Provider,
|
||||||
|
OSConfig: sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: f.OSConfig.ReadBufferSize,
|
||||||
|
WriteBufferSize: f.OSConfig.WriteBufferSize,
|
||||||
|
},
|
||||||
S3Config: S3FsConfig{
|
S3Config: S3FsConfig{
|
||||||
BaseS3FsConfig: sdk.BaseS3FsConfig{
|
BaseS3FsConfig: sdk.BaseS3FsConfig{
|
||||||
Bucket: f.S3Config.Bucket,
|
Bucket: f.S3Config.Bucket,
|
||||||
|
@ -342,6 +353,10 @@ func (f *Filesystem) GetACopy() Filesystem {
|
||||||
SASURL: f.AzBlobConfig.SASURL.Clone(),
|
SASURL: f.AzBlobConfig.SASURL.Clone(),
|
||||||
},
|
},
|
||||||
CryptConfig: CryptFsConfig{
|
CryptConfig: CryptFsConfig{
|
||||||
|
OSFsConfig: sdk.OSFsConfig{
|
||||||
|
ReadBufferSize: f.CryptConfig.ReadBufferSize,
|
||||||
|
WriteBufferSize: f.CryptConfig.WriteBufferSize,
|
||||||
|
},
|
||||||
Passphrase: f.CryptConfig.Passphrase.Clone(),
|
Passphrase: f.CryptConfig.Passphrase.Clone(),
|
||||||
},
|
},
|
||||||
SFTPConfig: SFTPFsConfig{
|
SFTPConfig: SFTPFsConfig{
|
||||||
|
|
|
@ -207,7 +207,7 @@ func (v *VirtualFolder) GetFilesystem(connectionID string, forbiddenSelfUsers []
|
||||||
case sdk.HTTPFilesystemProvider:
|
case sdk.HTTPFilesystemProvider:
|
||||||
return NewHTTPFs(connectionID, v.MappedPath, v.VirtualPath, v.FsConfig.HTTPConfig)
|
return NewHTTPFs(connectionID, v.MappedPath, v.VirtualPath, v.FsConfig.HTTPConfig)
|
||||||
default:
|
default:
|
||||||
return NewOsFs(connectionID, v.MappedPath, v.VirtualPath), nil
|
return NewOsFs(connectionID, v.MappedPath, v.VirtualPath, &v.FsConfig.OSConfig), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -472,7 +472,7 @@ func (*GCSFs) IsNotSupported(err error) bool {
|
||||||
// CheckRootPath creates the specified local root directory if it does not exists
|
// CheckRootPath creates the specified local root directory if it does not exists
|
||||||
func (fs *GCSFs) CheckRootPath(username string, uid int, gid int) bool {
|
func (fs *GCSFs) CheckRootPath(username string, uid int, gid int) bool {
|
||||||
// we need a local directory for temporary files
|
// we need a local directory for temporary files
|
||||||
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "")
|
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "", nil)
|
||||||
return osFs.CheckRootPath(username, uid, gid)
|
return osFs.CheckRootPath(username, uid, gid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -523,7 +523,7 @@ func (*HTTPFs) IsNotSupported(err error) bool {
|
||||||
// CheckRootPath creates the specified local root directory if it does not exists
|
// CheckRootPath creates the specified local root directory if it does not exists
|
||||||
func (fs *HTTPFs) CheckRootPath(username string, uid int, gid int) bool {
|
func (fs *HTTPFs) CheckRootPath(username string, uid int, gid int) bool {
|
||||||
// we need a local directory for temporary files
|
// we need a local directory for temporary files
|
||||||
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "")
|
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "", nil)
|
||||||
return osFs.CheckRootPath(username, uid, gid)
|
return osFs.CheckRootPath(username, uid, gid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
package vfs
|
package vfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -30,6 +31,7 @@ import (
|
||||||
fscopy "github.com/otiai10/copy"
|
fscopy "github.com/otiai10/copy"
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
"github.com/rs/xid"
|
"github.com/rs/xid"
|
||||||
|
"github.com/sftpgo/sdk"
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/logger"
|
"github.com/drakkan/sftpgo/v2/internal/logger"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/util"
|
"github.com/drakkan/sftpgo/v2/internal/util"
|
||||||
|
@ -55,15 +57,32 @@ type OsFs struct {
|
||||||
rootDir string
|
rootDir string
|
||||||
// if not empty this fs is mouted as virtual folder in the specified path
|
// if not empty this fs is mouted as virtual folder in the specified path
|
||||||
mountPath string
|
mountPath string
|
||||||
|
localTempDir string
|
||||||
|
readBufferSize int
|
||||||
|
writeBufferSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOsFs returns an OsFs object that allows to interact with local Os filesystem
|
// NewOsFs returns an OsFs object that allows to interact with local Os filesystem
|
||||||
func NewOsFs(connectionID, rootDir, mountPath string) Fs {
|
func NewOsFs(connectionID, rootDir, mountPath string, config *sdk.OSFsConfig) Fs {
|
||||||
|
var tempDir string
|
||||||
|
if tempPath != "" {
|
||||||
|
tempDir = tempPath
|
||||||
|
} else {
|
||||||
|
tempDir = filepath.Clean(os.TempDir())
|
||||||
|
}
|
||||||
|
var readBufferSize, writeBufferSize int
|
||||||
|
if config != nil {
|
||||||
|
readBufferSize = config.ReadBufferSize * 1024 * 1024
|
||||||
|
writeBufferSize = config.WriteBufferSize * 1024 * 1024
|
||||||
|
}
|
||||||
return &OsFs{
|
return &OsFs{
|
||||||
name: osFsName,
|
name: osFsName,
|
||||||
connectionID: connectionID,
|
connectionID: connectionID,
|
||||||
rootDir: rootDir,
|
rootDir: rootDir,
|
||||||
mountPath: getMountPath(mountPath),
|
mountPath: getMountPath(mountPath),
|
||||||
|
localTempDir: tempDir,
|
||||||
|
readBufferSize: readBufferSize,
|
||||||
|
writeBufferSize: writeBufferSize,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,7 +107,7 @@ func (fs *OsFs) Lstat(name string) (os.FileInfo, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open opens the named file for reading
|
// Open opens the named file for reading
|
||||||
func (*OsFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, func(), error) {
|
func (fs *OsFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, func(), error) {
|
||||||
f, err := os.Open(name)
|
f, err := os.Open(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
|
@ -100,11 +119,28 @@ func (*OsFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, func()
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if fs.readBufferSize <= 0 {
|
||||||
return f, nil, nil, err
|
return f, nil, nil, err
|
||||||
|
}
|
||||||
|
r, w, err := pipeat.PipeInDir(fs.localTempDir)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
br := bufio.NewReaderSize(f, fs.readBufferSize)
|
||||||
|
n, err := doCopy(w, br, nil)
|
||||||
|
w.CloseWithError(err) //nolint:errcheck
|
||||||
|
f.Close()
|
||||||
|
fsLog(fs, logger.LevelDebug, "download completed, path: %q size: %v, err: %v", name, n, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil, r, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates or opens the named file for writing
|
// Create creates or opens the named file for writing
|
||||||
func (*OsFs) Create(name string, flag, _ int) (File, *PipeWriter, func(), error) {
|
func (fs *OsFs) Create(name string, flag, _ int) (File, *PipeWriter, func(), error) {
|
||||||
|
if !fs.useWriteBuffering(flag) {
|
||||||
var err error
|
var err error
|
||||||
var f *os.File
|
var f *os.File
|
||||||
if flag == 0 {
|
if flag == 0 {
|
||||||
|
@ -113,6 +149,35 @@ func (*OsFs) Create(name string, flag, _ int) (File, *PipeWriter, func(), error)
|
||||||
f, err = os.OpenFile(name, flag, 0666)
|
f, err = os.OpenFile(name, flag, 0666)
|
||||||
}
|
}
|
||||||
return f, nil, nil, err
|
return f, nil, nil, err
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
r, w, err := pipeat.PipeInDir(fs.localTempDir)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
p := NewPipeWriter(w)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
bw := bufio.NewWriterSize(f, fs.writeBufferSize)
|
||||||
|
n, err := doCopy(bw, r, nil)
|
||||||
|
errFlush := bw.Flush()
|
||||||
|
if err == nil && errFlush != nil {
|
||||||
|
err = errFlush
|
||||||
|
}
|
||||||
|
errClose := f.Close()
|
||||||
|
if err == nil && errClose != nil {
|
||||||
|
err = errClose
|
||||||
|
}
|
||||||
|
r.CloseWithError(err) //nolint:errcheck
|
||||||
|
p.Done(err)
|
||||||
|
fsLog(fs, logger.LevelDebug, "upload completed, path: %q, readed bytes: %v, err: %v", name, n, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil, p, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rename renames (moves) source to target
|
// Rename renames (moves) source to target
|
||||||
|
@ -124,10 +189,16 @@ func (fs *OsFs) Rename(source, target string) (int, int64, error) {
|
||||||
if err != nil && isCrossDeviceError(err) {
|
if err != nil && isCrossDeviceError(err) {
|
||||||
fsLog(fs, logger.LevelError, "cross device error detected while renaming %q -> %q. Trying a copy and remove, this could take a long time",
|
fsLog(fs, logger.LevelError, "cross device error detected while renaming %q -> %q. Trying a copy and remove, this could take a long time",
|
||||||
source, target)
|
source, target)
|
||||||
|
var readBufferSize uint
|
||||||
|
if fs.readBufferSize > 0 {
|
||||||
|
readBufferSize = uint(fs.readBufferSize)
|
||||||
|
}
|
||||||
|
|
||||||
err = fscopy.Copy(source, target, fscopy.Options{
|
err = fscopy.Copy(source, target, fscopy.Options{
|
||||||
OnSymlink: func(src string) fscopy.SymlinkAction {
|
OnSymlink: func(src string) fscopy.SymlinkAction {
|
||||||
return fscopy.Skip
|
return fscopy.Skip
|
||||||
},
|
},
|
||||||
|
CopyBufferSize: readBufferSize,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fsLog(fs, logger.LevelError, "cross device copy error: %v", err)
|
fsLog(fs, logger.LevelError, "cross device copy error: %v", err)
|
||||||
|
@ -509,3 +580,21 @@ func (*OsFs) Close() error {
|
||||||
func (*OsFs) GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error) {
|
func (*OsFs) GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error) {
|
||||||
return getStatFS(dirName)
|
return getStatFS(dirName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fs *OsFs) useWriteBuffering(flag int) bool {
|
||||||
|
if fs.writeBufferSize <= 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if flag == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if flag&os.O_TRUNC == 0 {
|
||||||
|
fsLog(fs, logger.LevelDebug, "truncate flag missing, buffering write not possible")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if flag&os.O_RDWR != 0 {
|
||||||
|
fsLog(fs, logger.LevelDebug, "read and write flag found, buffering write not possible")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
|
@ -512,7 +512,7 @@ func (*S3Fs) IsNotSupported(err error) bool {
|
||||||
// CheckRootPath creates the specified local root directory if it does not exists
|
// CheckRootPath creates the specified local root directory if it does not exists
|
||||||
func (fs *S3Fs) CheckRootPath(username string, uid int, gid int) bool {
|
func (fs *S3Fs) CheckRootPath(username string, uid int, gid int) bool {
|
||||||
// we need a local directory for temporary files
|
// we need a local directory for temporary files
|
||||||
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "")
|
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "", nil)
|
||||||
return osFs.CheckRootPath(username, uid, gid)
|
return osFs.CheckRootPath(username, uid, gid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -404,7 +404,7 @@ func (fs *SFTPFs) Create(name string, flag, _ int) (File, *PipeWriter, func(), e
|
||||||
bw := bufio.NewWriterSize(f, int(fs.config.BufferSize)*1024*1024)
|
bw := bufio.NewWriterSize(f, int(fs.config.BufferSize)*1024*1024)
|
||||||
// we don't use io.Copy since bufio.Writer implements io.WriterTo and
|
// we don't use io.Copy since bufio.Writer implements io.WriterTo and
|
||||||
// so it calls the sftp.File WriteTo method without buffering
|
// so it calls the sftp.File WriteTo method without buffering
|
||||||
n, err := fs.copy(bw, r)
|
n, err := doCopy(bw, r, nil)
|
||||||
errFlush := bw.Flush()
|
errFlush := bw.Flush()
|
||||||
if err == nil && errFlush != nil {
|
if err == nil && errFlush != nil {
|
||||||
err = errFlush
|
err = errFlush
|
||||||
|
@ -573,7 +573,7 @@ func (*SFTPFs) IsNotSupported(err error) bool {
|
||||||
// CheckRootPath creates the specified local root directory if it does not exists
|
// CheckRootPath creates the specified local root directory if it does not exists
|
||||||
func (fs *SFTPFs) CheckRootPath(username string, uid int, gid int) bool {
|
func (fs *SFTPFs) CheckRootPath(username string, uid int, gid int) bool {
|
||||||
// local directory for temporary files in buffer mode
|
// local directory for temporary files in buffer mode
|
||||||
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "")
|
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "", nil)
|
||||||
osFs.CheckRootPath(username, uid, gid)
|
osFs.CheckRootPath(username, uid, gid)
|
||||||
if fs.config.Prefix == "/" {
|
if fs.config.Prefix == "/" {
|
||||||
return true
|
return true
|
||||||
|
@ -841,38 +841,6 @@ func (fs *SFTPFs) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *SFTPFs) copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
|
||||||
buf := make([]byte, 32768)
|
|
||||||
for {
|
|
||||||
nr, er := src.Read(buf)
|
|
||||||
if nr > 0 {
|
|
||||||
nw, ew := dst.Write(buf[0:nr])
|
|
||||||
if nw < 0 || nr < nw {
|
|
||||||
nw = 0
|
|
||||||
if ew == nil {
|
|
||||||
ew = errors.New("invalid write")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
written += int64(nw)
|
|
||||||
if ew != nil {
|
|
||||||
err = ew
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if nr != nw {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if er != nil {
|
|
||||||
if er != io.EOF {
|
|
||||||
err = er
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *SFTPFs) createConnection() error {
|
func (fs *SFTPFs) createConnection() error {
|
||||||
err := fs.conn.OpenConnection()
|
err := fs.conn.OpenConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -632,6 +632,7 @@ func (c *AzBlobFsConfig) validate() error {
|
||||||
|
|
||||||
// CryptFsConfig defines the configuration to store local files as encrypted
|
// CryptFsConfig defines the configuration to store local files as encrypted
|
||||||
type CryptFsConfig struct {
|
type CryptFsConfig struct {
|
||||||
|
sdk.OSFsConfig
|
||||||
Passphrase *kms.Secret `json:"passphrase,omitempty"`
|
Passphrase *kms.Secret `json:"passphrase,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -757,21 +758,24 @@ func IsHTTPFs(fs Fs) bool {
|
||||||
return strings.HasPrefix(fs.Name(), httpFsName)
|
return strings.HasPrefix(fs.Name(), httpFsName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsBufferedSFTPFs returns true if this is a buffered SFTP filesystem
|
// IsBufferedLocalOrSFTPFs returns true if this is a buffered SFTP or local filesystem
|
||||||
func IsBufferedSFTPFs(fs Fs) bool {
|
func IsBufferedLocalOrSFTPFs(fs Fs) bool {
|
||||||
|
if osFs, ok := fs.(*OsFs); ok {
|
||||||
|
return osFs.writeBufferSize > 0
|
||||||
|
}
|
||||||
if !IsSFTPFs(fs) {
|
if !IsSFTPFs(fs) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return !fs.IsUploadResumeSupported()
|
return !fs.IsUploadResumeSupported()
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsLocalOrUnbufferedSFTPFs returns true if fs is local or SFTP with no buffer
|
// FsOpenReturnsFile returns true if fs.Open returns a *os.File handle
|
||||||
func IsLocalOrUnbufferedSFTPFs(fs Fs) bool {
|
func FsOpenReturnsFile(fs Fs) bool {
|
||||||
if IsLocalOsFs(fs) {
|
if osFs, ok := fs.(*OsFs); ok {
|
||||||
return true
|
return osFs.readBufferSize == 0
|
||||||
}
|
}
|
||||||
if IsSFTPFs(fs) {
|
if sftpFs, ok := fs.(*SFTPFs); ok {
|
||||||
return fs.IsUploadResumeSupported()
|
return sftpFs.config.BufferSize == 0
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -929,6 +933,50 @@ func fsMetadataCheck(fs fsMetadataChecker, storageID, keyPrefix string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateOSFsConfig(config *sdk.OSFsConfig) error {
|
||||||
|
if config.ReadBufferSize < 0 || config.ReadBufferSize > 10 {
|
||||||
|
return fmt.Errorf("invalid read buffer size must be between 0 and 10 MB")
|
||||||
|
}
|
||||||
|
if config.WriteBufferSize < 0 || config.WriteBufferSize > 10 {
|
||||||
|
return fmt.Errorf("invalid write buffer size must be between 0 and 10 MB")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func doCopy(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
|
||||||
|
if buf == nil {
|
||||||
|
buf = make([]byte, 32768)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
nr, er := src.Read(buf)
|
||||||
|
if nr > 0 {
|
||||||
|
nw, ew := dst.Write(buf[0:nr])
|
||||||
|
if nw < 0 || nr < nw {
|
||||||
|
nw = 0
|
||||||
|
if ew == nil {
|
||||||
|
ew = errors.New("invalid write")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
written += int64(nw)
|
||||||
|
if ew != nil {
|
||||||
|
err = ew
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if nr != nw {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if er != nil {
|
||||||
|
if er != io.EOF {
|
||||||
|
err = er
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
func getMountPath(mountPath string) string {
|
func getMountPath(mountPath string) string {
|
||||||
if mountPath == "/" {
|
if mountPath == "/" {
|
||||||
return ""
|
return ""
|
||||||
|
|
|
@ -282,7 +282,7 @@ func (f *webDavFile) updateTransferQuotaOnSeek() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *webDavFile) checkFile() error {
|
func (f *webDavFile) checkFile() error {
|
||||||
if f.File == nil && vfs.IsLocalOrUnbufferedSFTPFs(f.Fs) {
|
if f.File == nil && vfs.FsOpenReturnsFile(f.Fs) {
|
||||||
file, _, _, err := f.Fs.Open(f.GetFsPath(), 0)
|
file, _, _, err := f.Fs.Open(f.GetFsPath(), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.Connection.Log(logger.LevelWarn, "could not open file %q for seeking: %v",
|
f.Connection.Log(logger.LevelWarn, "could not open file %q for seeking: %v",
|
||||||
|
|
|
@ -327,7 +327,7 @@ func (fs *MockOsFs) GetMimeType(_ string) (string, error) {
|
||||||
|
|
||||||
func newMockOsFs(atomicUpload bool, connectionID, rootDir string, reader *pipeat.PipeReaderAt, err error) vfs.Fs {
|
func newMockOsFs(atomicUpload bool, connectionID, rootDir string, reader *pipeat.PipeReaderAt, err error) vfs.Fs {
|
||||||
return &MockOsFs{
|
return &MockOsFs{
|
||||||
Fs: vfs.NewOsFs(connectionID, rootDir, ""),
|
Fs: vfs.NewOsFs(connectionID, rootDir, "", nil),
|
||||||
isAtomicUploadSupported: atomicUpload,
|
isAtomicUploadSupported: atomicUpload,
|
||||||
reader: reader,
|
reader: reader,
|
||||||
err: err,
|
err: err,
|
||||||
|
@ -484,7 +484,7 @@ func TestResolvePathErrors(t *testing.T) {
|
||||||
}
|
}
|
||||||
user.Permissions = make(map[string][]string)
|
user.Permissions = make(map[string][]string)
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||||
fs := vfs.NewOsFs("connID", user.HomeDir, "")
|
fs := vfs.NewOsFs("connID", user.HomeDir, "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
||||||
}
|
}
|
||||||
|
@ -517,7 +517,7 @@ func TestResolvePathErrors(t *testing.T) {
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
user.HomeDir = filepath.Clean(os.TempDir())
|
user.HomeDir = filepath.Clean(os.TempDir())
|
||||||
connection.User = user
|
connection.User = user
|
||||||
fs := vfs.NewOsFs("connID", connection.User.HomeDir, "")
|
fs := vfs.NewOsFs("connID", connection.User.HomeDir, "", nil)
|
||||||
subDir := "sub"
|
subDir := "sub"
|
||||||
testTxtFile := "file.txt"
|
testTxtFile := "file.txt"
|
||||||
err = os.MkdirAll(filepath.Join(os.TempDir(), subDir, subDir), os.ModePerm)
|
err = os.MkdirAll(filepath.Join(os.TempDir(), subDir, subDir), os.ModePerm)
|
||||||
|
@ -555,7 +555,7 @@ func TestFileAccessErrors(t *testing.T) {
|
||||||
}
|
}
|
||||||
user.Permissions = make(map[string][]string)
|
user.Permissions = make(map[string][]string)
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||||
fs := vfs.NewOsFs("connID", user.HomeDir, "")
|
fs := vfs.NewOsFs("connID", user.HomeDir, "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
||||||
}
|
}
|
||||||
|
@ -648,7 +648,7 @@ func TestCheckRequestMethodWithPrefix(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fs := vfs.NewOsFs("connID", user.HomeDir, "")
|
fs := vfs.NewOsFs("connID", user.HomeDir, "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
||||||
}
|
}
|
||||||
|
@ -672,7 +672,7 @@ func TestContentType(t *testing.T) {
|
||||||
}
|
}
|
||||||
user.Permissions = make(map[string][]string)
|
user.Permissions = make(map[string][]string)
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||||
fs := vfs.NewOsFs("connID", user.HomeDir, "")
|
fs := vfs.NewOsFs("connID", user.HomeDir, "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
||||||
}
|
}
|
||||||
|
@ -699,7 +699,7 @@ func TestContentType(t *testing.T) {
|
||||||
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFilePath, testFile+".unknown1",
|
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFilePath, testFile+".unknown1",
|
||||||
common.TransferDownload, 0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
common.TransferDownload, 0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
||||||
davFile = newWebDavFile(baseTransfer, nil, nil)
|
davFile = newWebDavFile(baseTransfer, nil, nil)
|
||||||
davFile.Fs = vfs.NewOsFs("id", user.HomeDir, "")
|
davFile.Fs = vfs.NewOsFs("id", user.HomeDir, "", nil)
|
||||||
fi, err = davFile.Stat()
|
fi, err = davFile.Stat()
|
||||||
if assert.NoError(t, err) {
|
if assert.NoError(t, err) {
|
||||||
ctype, err := fi.(*webDavFileInfo).ContentType(ctx)
|
ctype, err := fi.(*webDavFileInfo).ContentType(ctx)
|
||||||
|
@ -712,7 +712,7 @@ func TestContentType(t *testing.T) {
|
||||||
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFilePath, testFile,
|
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFilePath, testFile,
|
||||||
common.TransferDownload, 0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
common.TransferDownload, 0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
||||||
davFile = newWebDavFile(baseTransfer, nil, nil)
|
davFile = newWebDavFile(baseTransfer, nil, nil)
|
||||||
davFile.Fs = vfs.NewOsFs("id", user.HomeDir, "")
|
davFile.Fs = vfs.NewOsFs("id", user.HomeDir, "", nil)
|
||||||
fi, err = davFile.Stat()
|
fi, err = davFile.Stat()
|
||||||
if assert.NoError(t, err) {
|
if assert.NoError(t, err) {
|
||||||
ctype, err := fi.(*webDavFileInfo).ContentType(ctx)
|
ctype, err := fi.(*webDavFileInfo).ContentType(ctx)
|
||||||
|
@ -727,7 +727,7 @@ func TestContentType(t *testing.T) {
|
||||||
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFilePath, testFile+".custom",
|
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFilePath, testFile+".custom",
|
||||||
common.TransferDownload, 0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
common.TransferDownload, 0, 0, 0, 0, false, fs, dataprovider.TransferQuota{})
|
||||||
davFile = newWebDavFile(baseTransfer, nil, nil)
|
davFile = newWebDavFile(baseTransfer, nil, nil)
|
||||||
davFile.Fs = vfs.NewOsFs("id", user.HomeDir, "")
|
davFile.Fs = vfs.NewOsFs("id", user.HomeDir, "", nil)
|
||||||
fi, err = davFile.Stat()
|
fi, err = davFile.Stat()
|
||||||
if assert.NoError(t, err) {
|
if assert.NoError(t, err) {
|
||||||
ctype, err := fi.(*webDavFileInfo).ContentType(ctx)
|
ctype, err := fi.(*webDavFileInfo).ContentType(ctx)
|
||||||
|
@ -781,7 +781,7 @@ func TestTransferReadWriteErrors(t *testing.T) {
|
||||||
}
|
}
|
||||||
user.Permissions = make(map[string][]string)
|
user.Permissions = make(map[string][]string)
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||||
fs := vfs.NewOsFs("connID", user.HomeDir, "")
|
fs := vfs.NewOsFs("connID", user.HomeDir, "", nil)
|
||||||
connection := &Connection{
|
connection := &Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
||||||
}
|
}
|
||||||
|
@ -937,7 +937,7 @@ func TestTransferSeek(t *testing.T) {
|
||||||
assert.True(t, fs.IsNotExist(err))
|
assert.True(t, fs.IsNotExist(err))
|
||||||
davFile.Connection.RemoveTransfer(davFile.BaseTransfer)
|
davFile.Connection.RemoveTransfer(davFile.BaseTransfer)
|
||||||
|
|
||||||
fs = vfs.NewOsFs(fs.ConnectionID(), user.GetHomeDir(), "")
|
fs = vfs.NewOsFs(fs.ConnectionID(), user.GetHomeDir(), "", nil)
|
||||||
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath+"1", testFilePath+"1", testFile,
|
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath+"1", testFilePath+"1", testFile,
|
||||||
common.TransferDownload, 0, 0, 0, 0, false, fs, dataprovider.TransferQuota{AllowedTotalSize: 100})
|
common.TransferDownload, 0, 0, 0, 0, false, fs, dataprovider.TransferQuota{AllowedTotalSize: 100})
|
||||||
davFile = newWebDavFile(baseTransfer, nil, nil)
|
davFile = newWebDavFile(baseTransfer, nil, nil)
|
||||||
|
|
|
@ -52,6 +52,7 @@ import (
|
||||||
"github.com/drakkan/sftpgo/v2/internal/kms"
|
"github.com/drakkan/sftpgo/v2/internal/kms"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/logger"
|
"github.com/drakkan/sftpgo/v2/internal/logger"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/sftpd"
|
"github.com/drakkan/sftpgo/v2/internal/sftpd"
|
||||||
|
"github.com/drakkan/sftpgo/v2/internal/util"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/vfs"
|
"github.com/drakkan/sftpgo/v2/internal/vfs"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/webdavd"
|
"github.com/drakkan/sftpgo/v2/internal/webdavd"
|
||||||
)
|
)
|
||||||
|
@ -720,6 +721,70 @@ func TestBasicHandlingCryptFs(t *testing.T) {
|
||||||
1*time.Second, 100*time.Millisecond)
|
1*time.Second, 100*time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBufferedUser(t *testing.T) {
|
||||||
|
u := getTestUser()
|
||||||
|
u.FsConfig.OSConfig = sdk.OSFsConfig{
|
||||||
|
WriteBufferSize: 2,
|
||||||
|
ReadBufferSize: 1,
|
||||||
|
}
|
||||||
|
vdirPath := "/crypted"
|
||||||
|
mappedPath := filepath.Join(os.TempDir(), util.GenerateUniqueID())
|
||||||
|
folderName := filepath.Base(mappedPath)
|
||||||
|
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
|
||||||
|
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||||
|
Name: folderName,
|
||||||
|
MappedPath: mappedPath,
|
||||||
|
FsConfig: vfs.Filesystem{
|
||||||
|
Provider: sdk.CryptedFilesystemProvider,
|
||||||
|
CryptConfig: vfs.CryptFsConfig{
|
||||||
|
OSFsConfig: sdk.OSFsConfig{
|
||||||
|
WriteBufferSize: 3,
|
||||||
|
ReadBufferSize: 2,
|
||||||
|
},
|
||||||
|
Passphrase: kms.NewPlainSecret(defaultPassword),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
VirtualPath: vdirPath,
|
||||||
|
QuotaFiles: -1,
|
||||||
|
QuotaSize: -1,
|
||||||
|
})
|
||||||
|
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
client := getWebDavClient(user, false, nil)
|
||||||
|
assert.NoError(t, checkBasicFunc(client))
|
||||||
|
|
||||||
|
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||||
|
testFileSize := int64(65535)
|
||||||
|
err = createTestFile(testFilePath, testFileSize)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = uploadFileWithRawClient(testFilePath, testFileName,
|
||||||
|
user.Username, defaultPassword, false, testFileSize, client)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = uploadFileWithRawClient(testFilePath, path.Join(vdirPath, testFileName),
|
||||||
|
user.Username, defaultPassword, false, testFileSize, client)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||||
|
err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = downloadFile(path.Join(vdirPath, testFileName), localDownloadPath, testFileSize, client)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = os.Remove(testFilePath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.Remove(localDownloadPath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveUser(user, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.RemoveAll(user.GetHomeDir())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: folderName}, http.StatusOK)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.RemoveAll(mappedPath)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestLoginEmptyPassword(t *testing.T) {
|
func TestLoginEmptyPassword(t *testing.T) {
|
||||||
u := getTestUser()
|
u := getTestUser()
|
||||||
u.Password = ""
|
u.Password = ""
|
||||||
|
@ -1497,7 +1562,7 @@ func TestMaxConnections(t *testing.T) {
|
||||||
client := getWebDavClient(user, true, nil)
|
client := getWebDavClient(user, true, nil)
|
||||||
assert.NoError(t, checkBasicFunc(client))
|
assert.NoError(t, checkBasicFunc(client))
|
||||||
// now add a fake connection
|
// now add a fake connection
|
||||||
fs := vfs.NewOsFs("id", os.TempDir(), "")
|
fs := vfs.NewOsFs("id", os.TempDir(), "", nil)
|
||||||
connection := &webdavd.Connection{
|
connection := &webdavd.Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
||||||
}
|
}
|
||||||
|
@ -1576,7 +1641,7 @@ func TestMaxSessions(t *testing.T) {
|
||||||
client := getWebDavClient(user, false, nil)
|
client := getWebDavClient(user, false, nil)
|
||||||
assert.NoError(t, checkBasicFunc(client))
|
assert.NoError(t, checkBasicFunc(client))
|
||||||
// now add a fake connection
|
// now add a fake connection
|
||||||
fs := vfs.NewOsFs("id", os.TempDir(), "")
|
fs := vfs.NewOsFs("id", os.TempDir(), "", nil)
|
||||||
connection := &webdavd.Connection{
|
connection := &webdavd.Connection{
|
||||||
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, "", "", user),
|
||||||
}
|
}
|
||||||
|
|
|
@ -5729,11 +5729,34 @@ components:
|
||||||
use_emulator:
|
use_emulator:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: Azure Blob Storage configuration details
|
description: Azure Blob Storage configuration details
|
||||||
|
OSFsConfig:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
read_buffer_size:
|
||||||
|
type: integer
|
||||||
|
minimum: 0
|
||||||
|
maximum: 10
|
||||||
|
description: 'The read buffer size, as MB, to use for downloads. 0 means no buffering, that's fine in most use cases.'
|
||||||
|
write_buffer_size:
|
||||||
|
type: integer
|
||||||
|
minimum: 0
|
||||||
|
maximum: 10
|
||||||
|
description: 'The write buffer size, as MB, to use for uploads. 0 means no buffering, that's fine in most use cases.'
|
||||||
CryptFsConfig:
|
CryptFsConfig:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
passphrase:
|
passphrase:
|
||||||
$ref: '#/components/schemas/Secret'
|
$ref: '#/components/schemas/Secret'
|
||||||
|
read_buffer_size:
|
||||||
|
type: integer
|
||||||
|
minimum: 0
|
||||||
|
maximum: 10
|
||||||
|
description: 'The read buffer size, as MB, to use for downloads. 0 means no buffering, that's fine in most use cases.'
|
||||||
|
write_buffer_size:
|
||||||
|
type: integer
|
||||||
|
minimum: 0
|
||||||
|
maximum: 10
|
||||||
|
description: 'The write buffer size, as MB, to use for uploads. 0 means no buffering, that's fine in most use cases.'
|
||||||
description: Crypt filesystem configuration details
|
description: Crypt filesystem configuration details
|
||||||
SFTPFsConfig:
|
SFTPFsConfig:
|
||||||
type: object
|
type: object
|
||||||
|
@ -5804,6 +5827,8 @@ components:
|
||||||
properties:
|
properties:
|
||||||
provider:
|
provider:
|
||||||
$ref: '#/components/schemas/FsProviders'
|
$ref: '#/components/schemas/FsProviders'
|
||||||
|
osconfig:
|
||||||
|
$ref: '#/components/schemas/OSFsConfig'
|
||||||
s3config:
|
s3config:
|
||||||
$ref: '#/components/schemas/S3Config'
|
$ref: '#/components/schemas/S3Config'
|
||||||
gcsconfig:
|
gcsconfig:
|
||||||
|
|
|
@ -53,6 +53,25 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
{{end}}
|
{{end}}
|
||||||
|
<div class="form-group row fsconfig fsconfig-osfs">
|
||||||
|
<label for="idOsReadBufferSize" class="col-sm-2 col-form-label">Read buffer (MB)</label>
|
||||||
|
<div class="col-sm-3">
|
||||||
|
<input type="number" class="form-control" id="idOsReadBufferSize" name="osfs_read_buffer_size" placeholder=""
|
||||||
|
value="{{.OSConfig.ReadBufferSize}}" min="0" max="10" aria-describedby="OSReadBufferSizeHelpBlock">
|
||||||
|
<small id="OSReadBufferSizeHelpBlock" class="form-text text-muted">
|
||||||
|
Buffer size for downloads. 0 means no buffer, that's fine in most use cases.
|
||||||
|
</small>
|
||||||
|
</div>
|
||||||
|
<div class="col-sm-2"></div>
|
||||||
|
<label for="idOsWriteBufferSize" class="col-sm-2 col-form-label">Write buffer (MB)</label>
|
||||||
|
<div class="col-sm-3">
|
||||||
|
<input type="number" class="form-control" id="idOsWriteBufferSize" name="osfs_write_buffer_size" placeholder=""
|
||||||
|
value="{{.OSConfig.WriteBufferSize}}" min="0" max="10" aria-describedby="OSWriteBufferSizeHelpBlock">
|
||||||
|
<small id="OSWriteBufferSizeHelpBlock" class="form-text text-muted">
|
||||||
|
Buffer size for uploads. 0 means no buffer, that's fine in most use cases.
|
||||||
|
</small>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
<div class="form-group row fsconfig fsconfig-s3fs">
|
<div class="form-group row fsconfig fsconfig-s3fs">
|
||||||
<label for="idS3Bucket" class="col-sm-2 col-form-label">Bucket</label>
|
<label for="idS3Bucket" class="col-sm-2 col-form-label">Bucket</label>
|
||||||
<div class="col-sm-10">
|
<div class="col-sm-10">
|
||||||
|
@ -426,6 +445,26 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group row fsconfig fsconfig-cryptfs">
|
||||||
|
<label for="idCryptFsReadBufferSize" class="col-sm-2 col-form-label">Read buffer (MB)</label>
|
||||||
|
<div class="col-sm-3">
|
||||||
|
<input type="number" class="form-control" id="idCryptFsReadBufferSize" name="cryptfs_read_buffer_size" placeholder=""
|
||||||
|
value="{{.CryptConfig.ReadBufferSize}}" min="0" max="10" aria-describedby="CryptFsReadBufferSizeHelpBlock">
|
||||||
|
<small id="CryptFsReadBufferSizeHelpBlock" class="form-text text-muted">
|
||||||
|
Buffer size for downloads. 0 means no buffer, that's fine in most use cases.
|
||||||
|
</small>
|
||||||
|
</div>
|
||||||
|
<div class="col-sm-2"></div>
|
||||||
|
<label for="idCryptFsWriteBufferSize" class="col-sm-2 col-form-label">Write buffer (MB)</label>
|
||||||
|
<div class="col-sm-3">
|
||||||
|
<input type="number" class="form-control" id="idCryptFsWriteBufferSize" name="cryptfs_write_buffer_size" placeholder=""
|
||||||
|
value="{{.CryptConfig.WriteBufferSize}}" min="0" max="10" aria-describedby="CryptFsWriteBufferSizeHelpBlock">
|
||||||
|
<small id="CryptFsWriteBufferSizeHelpBlock" class="form-text text-muted">
|
||||||
|
Buffer size for uploads. 0 means no buffer, that's fine in most use cases.
|
||||||
|
</small>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div class="form-group row fsconfig fsconfig-sftpfs">
|
<div class="form-group row fsconfig fsconfig-sftpfs">
|
||||||
<label for="idSFTPEndpoint" class="col-sm-2 col-form-label">Endpoint</label>
|
<label for="idSFTPEndpoint" class="col-sm-2 col-form-label">Endpoint</label>
|
||||||
<div class="col-sm-10">
|
<div class="col-sm-10">
|
||||||
|
|
Loading…
Reference in a new issue