mirror of
https://github.com/drakkan/sftpgo.git
synced 2024-11-21 23:20:24 +00:00
add Data At Rest Encryption support
This commit is contained in:
parent
95c6d41c35
commit
4a88ea5c03
38 changed files with 1754 additions and 139 deletions
|
@ -21,6 +21,7 @@ It can serve local filesystem, S3 (compatible) Object Storage, Google Cloud Stor
|
|||
- Partial authentication. You can configure multi-step authentication requiring, for example, the user password after successful public key authentication.
|
||||
- Per user authentication methods. You can configure the allowed authentication methods for each user.
|
||||
- Custom authentication via external programs/HTTP API is supported.
|
||||
- [Data At Rest Encryption](./docs/dare.md) is supported.
|
||||
- Dynamic user modification before login via external programs/HTTP API is supported.
|
||||
- Quota support: accounts can have individual quota expressed as max total size and/or max number of files.
|
||||
- Bandwidth throttling is supported, with distinct settings for upload and download.
|
||||
|
|
|
@ -66,6 +66,7 @@ var (
|
|||
portableAzULPartSize int
|
||||
portableAzULConcurrency int
|
||||
portableAzUseEmulator bool
|
||||
portableCryptPassphrase string
|
||||
portableCmd = &cobra.Command{
|
||||
Use: "portable",
|
||||
Short: "Serve a single directory",
|
||||
|
@ -173,6 +174,9 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
UploadPartSize: int64(portableAzULPartSize),
|
||||
UploadConcurrency: portableAzULConcurrency,
|
||||
},
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(portableCryptPassphrase),
|
||||
},
|
||||
},
|
||||
Filters: dataprovider.UserFilters{
|
||||
FilePatterns: parsePatternsFilesFilters(),
|
||||
|
@ -240,7 +244,8 @@ inside the advertised TXT record`)
|
|||
portableCmd.Flags().IntVarP(&portableFsProvider, "fs-provider", "f", int(dataprovider.LocalFilesystemProvider), `0 => local filesystem
|
||||
1 => AWS S3 compatible
|
||||
2 => Google Cloud Storage
|
||||
3 => Azure Blob Storage`)
|
||||
3 => Azure Blob Storage
|
||||
4 => Encrypted local filesystem`)
|
||||
portableCmd.Flags().StringVar(&portableS3Bucket, "s3-bucket", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3Region, "s3-region", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3AccessKey, "s3-access-key", "", "")
|
||||
|
@ -286,6 +291,7 @@ prefix and its contents`)
|
|||
portableCmd.Flags().IntVar(&portableAzULConcurrency, "az-upload-concurrency", 2, `How many parts are uploaded in
|
||||
parallel`)
|
||||
portableCmd.Flags().BoolVar(&portableAzUseEmulator, "az-use-emulator", false, "")
|
||||
portableCmd.Flags().StringVar(&portableCryptPassphrase, "crypto-passphrase", "", `Passphrase for encryption/decryption`)
|
||||
rootCmd.AddCommand(portableCmd)
|
||||
}
|
||||
|
||||
|
|
|
@ -15,9 +15,11 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpclient"
|
||||
"github.com/drakkan/sftpgo/kms"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
@ -534,3 +536,18 @@ func TestPostConnectHook(t *testing.T) {
|
|||
|
||||
Config.PostConnectHook = ""
|
||||
}
|
||||
|
||||
func TestCryptoConvertFileInfo(t *testing.T) {
|
||||
name := "name"
|
||||
fs, err := vfs.NewCryptFs("connID1", os.TempDir(), vfs.CryptFsConfig{Passphrase: kms.NewPlainSecret("secret")})
|
||||
require.NoError(t, err)
|
||||
cryptFs := fs.(*vfs.CryptFs)
|
||||
info := vfs.NewFileInfo(name, true, 48, time.Now(), false)
|
||||
assert.Equal(t, info, cryptFs.ConvertFileInfo(info))
|
||||
info = vfs.NewFileInfo(name, false, 48, time.Now(), false)
|
||||
assert.NotEqual(t, info.Size(), cryptFs.ConvertFileInfo(info).Size())
|
||||
info = vfs.NewFileInfo(name, false, 33, time.Now(), false)
|
||||
assert.Equal(t, int64(0), cryptFs.ConvertFileInfo(info).Size())
|
||||
info = vfs.NewFileInfo(name, false, 1, time.Now(), false)
|
||||
assert.Equal(t, int64(0), cryptFs.ConvertFileInfo(info).Size())
|
||||
}
|
||||
|
|
|
@ -442,10 +442,17 @@ func (c *BaseConnection) getPathForSetStatPerms(fsPath, virtualPath string) stri
|
|||
|
||||
// DoStat execute a Stat if mode = 0, Lstat if mode = 1
|
||||
func (c *BaseConnection) DoStat(fsPath string, mode int) (os.FileInfo, error) {
|
||||
var info os.FileInfo
|
||||
var err error
|
||||
if mode == 1 {
|
||||
return c.Fs.Lstat(c.getRealFsPath(fsPath))
|
||||
info, err = c.Fs.Lstat(c.getRealFsPath(fsPath))
|
||||
} else {
|
||||
info, err = c.Fs.Stat(c.getRealFsPath(fsPath))
|
||||
}
|
||||
return c.Fs.Stat(c.getRealFsPath(fsPath))
|
||||
if err == nil && vfs.IsCryptOsFs(c.Fs) {
|
||||
info = c.Fs.(*vfs.CryptFs).ConvertFileInfo(info)
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
func (c *BaseConnection) ignoreSetStat() bool {
|
||||
|
|
|
@ -9,11 +9,13 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sio"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/kms"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
|
@ -459,6 +461,22 @@ func TestDoStat(t *testing.T) {
|
|||
}
|
||||
assert.False(t, os.SameFile(infoStat, infoLstat))
|
||||
|
||||
fs, err = vfs.NewCryptFs(fs.ConnectionID(), os.TempDir(), vfs.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret("payload"),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
conn = NewBaseConnection(fs.ConnectionID(), ProtocolFTP, u, fs)
|
||||
dataSize := int64(32768)
|
||||
data := make([]byte, dataSize)
|
||||
err = ioutil.WriteFile(testFile, data, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
infoStat, err = conn.DoStat(testFile, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Less(t, infoStat.Size(), dataSize)
|
||||
encSize, err := sio.EncryptedSize(uint64(infoStat.Size()))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(encSize)+33, dataSize)
|
||||
|
||||
err = os.Remove(testFile)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFile + ".sym")
|
||||
|
|
|
@ -174,6 +174,21 @@ func (t *BaseTransfer) TransferError(err error) {
|
|||
atomic.LoadInt64(&t.BytesReceived), elapsed)
|
||||
}
|
||||
|
||||
func (t *BaseTransfer) getUploadFileSize() (int64, error) {
|
||||
var fileSize int64
|
||||
info, err := t.Fs.Stat(t.fsPath)
|
||||
if err == nil {
|
||||
fileSize = info.Size()
|
||||
}
|
||||
if vfs.IsCryptOsFs(t.Fs) && t.ErrTransfer != nil {
|
||||
errDelete := os.Remove(t.fsPath)
|
||||
if errDelete != nil {
|
||||
t.Connection.Log(logger.LevelWarn, "error removing partial crypto file %#v: %v", t.fsPath, errDelete)
|
||||
}
|
||||
}
|
||||
return fileSize, err
|
||||
}
|
||||
|
||||
// Close it is called when the transfer is completed.
|
||||
// It logs the transfer info, updates the user quota (for uploads)
|
||||
// and executes any defined action.
|
||||
|
@ -223,11 +238,10 @@ func (t *BaseTransfer) Close() error {
|
|||
go actionHandler.Handle(action) //nolint:errcheck
|
||||
} else {
|
||||
fileSize := atomic.LoadInt64(&t.BytesReceived) + t.MinWriteOffset
|
||||
info, err := t.Fs.Stat(t.fsPath)
|
||||
if err == nil {
|
||||
fileSize = info.Size()
|
||||
if statSize, err := t.getUploadFileSize(); err == nil {
|
||||
fileSize = statSize
|
||||
}
|
||||
t.Connection.Log(logger.LevelDebug, "uploaded file size %v stat error: %v", fileSize, err)
|
||||
t.Connection.Log(logger.LevelDebug, "uploaded file size %v", fileSize)
|
||||
t.updateQuota(numFiles, fileSize)
|
||||
logger.TransferLog(uploadLogSender, t.fsPath, elapsed, atomic.LoadInt64(&t.BytesReceived), t.Connection.User.Username,
|
||||
t.Connection.ID, t.Connection.protocol)
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/kms"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
|
@ -252,3 +253,24 @@ func TestTransferErrors(t *testing.T) {
|
|||
|
||||
assert.Len(t, conn.GetTransfers(), 0)
|
||||
}
|
||||
|
||||
func TestRemovePartialCryptoFile(t *testing.T) {
|
||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||
fs, err := vfs.NewCryptFs("id", os.TempDir(), vfs.CryptFsConfig{Passphrase: kms.NewPlainSecret("secret")})
|
||||
require.NoError(t, err)
|
||||
u := dataprovider.User{
|
||||
Username: "test",
|
||||
HomeDir: os.TempDir(),
|
||||
}
|
||||
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, u, fs)
|
||||
transfer := NewBaseTransfer(nil, conn, nil, testFile, "/transfer_test_file", TransferUpload, 0, 0, 0, true, fs)
|
||||
transfer.ErrTransfer = errors.New("test error")
|
||||
_, err = transfer.getUploadFileSize()
|
||||
assert.Error(t, err)
|
||||
err = ioutil.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
size, err := transfer.getUploadFileSize()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(9), size)
|
||||
assert.NoFileExists(t, testFile)
|
||||
}
|
||||
|
|
|
@ -1158,6 +1158,7 @@ func validateFilesystemConfig(user *User) error {
|
|||
}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
return nil
|
||||
} else if user.FsConfig.Provider == GCSFilesystemProvider {
|
||||
err := vfs.ValidateGCSFsConfig(&user.FsConfig.GCSConfig, user.getGCSCredentialsFilePath())
|
||||
|
@ -1166,6 +1167,7 @@ func validateFilesystemConfig(user *User) error {
|
|||
}
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
return nil
|
||||
} else if user.FsConfig.Provider == AzureBlobFilesystemProvider {
|
||||
err := vfs.ValidateAzBlobFsConfig(&user.FsConfig.AzBlobConfig)
|
||||
|
@ -1181,12 +1183,30 @@ func validateFilesystemConfig(user *User) error {
|
|||
}
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
return nil
|
||||
} else if user.FsConfig.Provider == CryptedFilesystemProvider {
|
||||
err := vfs.ValidateCryptFsConfig(&user.FsConfig.CryptConfig)
|
||||
if err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not validate Crypt fs config: %v", err)}
|
||||
}
|
||||
if user.FsConfig.CryptConfig.Passphrase.IsPlain() {
|
||||
user.FsConfig.CryptConfig.Passphrase.SetAdditionalData(user.Username)
|
||||
err = user.FsConfig.CryptConfig.Passphrase.Encrypt()
|
||||
if err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not encrypt Crypt fs passphrase: %v", err)}
|
||||
}
|
||||
}
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
return nil
|
||||
}
|
||||
user.FsConfig.Provider = LocalFilesystemProvider
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -155,6 +155,7 @@ const (
|
|||
S3FilesystemProvider // AWS S3 compatible
|
||||
GCSFilesystemProvider // Google Cloud Storage
|
||||
AzureBlobFilesystemProvider // Azure Blob Storage
|
||||
CryptedFilesystemProvider // Local encrypted
|
||||
)
|
||||
|
||||
// Filesystem defines cloud storage filesystem details
|
||||
|
@ -163,6 +164,7 @@ type Filesystem struct {
|
|||
S3Config vfs.S3FsConfig `json:"s3config,omitempty"`
|
||||
GCSConfig vfs.GCSFsConfig `json:"gcsconfig,omitempty"`
|
||||
AzBlobConfig vfs.AzBlobFsConfig `json:"azblobconfig,omitempty"`
|
||||
CryptConfig vfs.CryptFsConfig `json:"cryptconfig,omitempty"`
|
||||
}
|
||||
|
||||
// User defines a SFTPGo user
|
||||
|
@ -221,16 +223,20 @@ type User struct {
|
|||
|
||||
// GetFilesystem returns the filesystem for this user
|
||||
func (u *User) GetFilesystem(connectionID string) (vfs.Fs, error) {
|
||||
if u.FsConfig.Provider == S3FilesystemProvider {
|
||||
switch u.FsConfig.Provider {
|
||||
case S3FilesystemProvider:
|
||||
return vfs.NewS3Fs(connectionID, u.GetHomeDir(), u.FsConfig.S3Config)
|
||||
} else if u.FsConfig.Provider == GCSFilesystemProvider {
|
||||
case GCSFilesystemProvider:
|
||||
config := u.FsConfig.GCSConfig
|
||||
config.CredentialFile = u.getGCSCredentialsFilePath()
|
||||
return vfs.NewGCSFs(connectionID, u.GetHomeDir(), config)
|
||||
} else if u.FsConfig.Provider == AzureBlobFilesystemProvider {
|
||||
case AzureBlobFilesystemProvider:
|
||||
return vfs.NewAzBlobFs(connectionID, u.GetHomeDir(), u.FsConfig.AzBlobConfig)
|
||||
case CryptedFilesystemProvider:
|
||||
return vfs.NewCryptFs(connectionID, u.GetHomeDir(), u.FsConfig.CryptConfig)
|
||||
default:
|
||||
return vfs.NewOsFs(connectionID, u.GetHomeDir(), u.VirtualFolders), nil
|
||||
}
|
||||
return vfs.NewOsFs(connectionID, u.GetHomeDir(), u.VirtualFolders), nil
|
||||
}
|
||||
|
||||
// HideConfidentialData hides user confidential data
|
||||
|
@ -243,6 +249,8 @@ func (u *User) HideConfidentialData() {
|
|||
u.FsConfig.GCSConfig.Credentials.Hide()
|
||||
case AzureBlobFilesystemProvider:
|
||||
u.FsConfig.AzBlobConfig.AccountKey.Hide()
|
||||
case CryptedFilesystemProvider:
|
||||
u.FsConfig.CryptConfig.Passphrase.Hide()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -780,6 +788,9 @@ func (u *User) SetEmptySecretsIfNil() {
|
|||
if u.FsConfig.AzBlobConfig.AccountKey == nil {
|
||||
u.FsConfig.AzBlobConfig.AccountKey = kms.NewEmptySecret()
|
||||
}
|
||||
if u.FsConfig.CryptConfig.Passphrase == nil {
|
||||
u.FsConfig.CryptConfig.Passphrase = kms.NewEmptySecret()
|
||||
}
|
||||
}
|
||||
|
||||
func (u *User) getACopy() User {
|
||||
|
@ -841,6 +852,9 @@ func (u *User) getACopy() User {
|
|||
UseEmulator: u.FsConfig.AzBlobConfig.UseEmulator,
|
||||
AccessTier: u.FsConfig.AzBlobConfig.AccessTier,
|
||||
},
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
Passphrase: u.FsConfig.CryptConfig.Passphrase.Clone(),
|
||||
},
|
||||
}
|
||||
|
||||
return User{
|
||||
|
|
|
@ -50,7 +50,7 @@ For each account, the following properties can be configured:
|
|||
- `allowed_patterns`, list of, case insensitive, allowed file patterns. Examples: `*.jpg`, `a*b?.png`. Any non matching file will be denied
|
||||
- `denied_patterns`, list of, case insensitive, denied file patterns. Denied file patterns are evaluated before the allowed ones
|
||||
- `path`, exposed virtual path, if no other specific filter is defined, the filter apply for sub directories too. For example if filters are defined for the paths `/` and `/sub` then the filters for `/` are applied for any file outside the `/sub` directory
|
||||
- `fs_provider`, filesystem to serve via SFTP. Local filesystem (0), S3 Compatible Object Storage (1), Google Cloud Storage (2) and Azure Blob Storage (3) are supported
|
||||
- `fs_provider`, filesystem to serve via SFTP. Local filesystem (0), S3 Compatible Object Storage (1), Google Cloud Storage (2), Azure Blob Storage (3) and encrypted local filesystem (4) are supported
|
||||
- `s3_bucket`, required for S3 filesystem
|
||||
- `s3_region`, required for S3 filesystem. Must match the region for your bucket. You can find here the list of available [AWS regions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). For example if your bucket is at `Frankfurt` you have to set the region to `eu-central-1`
|
||||
- `s3_access_key`
|
||||
|
@ -74,6 +74,7 @@ For each account, the following properties can be configured:
|
|||
- `az_upload_concurrency`, how many parts are uploaded in parallel. Zero means the default (2)
|
||||
- `az_key_prefix`, allows to restrict access to the folder identified by this prefix and its contents
|
||||
- `az_use_emulator`, boolean
|
||||
- `crypt_passphrase`, passphrase to use for local encryption
|
||||
- `additional_info`, string. Free text field
|
||||
|
||||
These properties are stored inside the data provider.
|
||||
|
|
19
docs/dare.md
Normal file
19
docs/dare.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Data At Rest Encryption (DARE)
|
||||
|
||||
SFTPGo supports data at-rest encryption via the `cryptfs` virtual file system, in this mode SFTPGo transparently encrypts and decrypts data (to/from the disk) on-the-fly during uploads and/or downloads, making sure that the files at-rest on the server-side are always encrypted.
|
||||
|
||||
So, because of the way it works, as described here above, when you set up an encrypted filesystem for a user you need to make sure it points to an empty path/directory (that has no files in it). Otherwise, it would try to decrypt existing files that are not encrypted in the first place and fail.
|
||||
|
||||
The SFTPGo's `cryptfs` is a tiny wrapper around [sio](https://github.com/minio/sio) therefore data is encrypted and authenticated using `AES-256-GCM` or `ChaCha20-Poly1305`. AES-GCM will be used if the CPU provides hardware support for it.
|
||||
|
||||
The only required configuration parameter is a `passphrase`, each file will be encrypted using an unique, randomly generated secret key derived from the given passphrase using the HMAC-based Extract-and-Expand Key Derivation Function (HKDF) as defined in [RFC 5869](http://tools.ietf.org/html/rfc5869). It is important to note that the per-object encryption key is never stored anywhere: it is derived from your `passphrase` and a randomly generated initialization vector just before encryption/decryption. The initialization vector is stored with the file.
|
||||
|
||||
The passphrase is stored encrypted itself according to your [KMS configuration](./kms.md) and is required to decrypt any file encrypted using an encryption key derived from it.
|
||||
|
||||
The encrypted filesystem has some limitations compared to the local, unencrypted, one:
|
||||
|
||||
- Upload resume is not supported.
|
||||
- Opening a file for both reading and writing at the same time is not supported and so clients that require advanced filesystem-like features such as `sshfs` are not supported too.
|
||||
- Truncate is not supported.
|
||||
- System commands such as `git` or `rsync` are not supported: they will store data unencrypted.
|
||||
- Virtual folders are not implemented for now, if you are interested in this feature, please consider submitting a well written pull request (fully covered by test cases) or sponsoring this development. We could add a filesystem configuration to each virtual folder so we can mount encrypted or cloud backends as subfolders for local filesystems and vice versa.
|
|
@ -41,6 +41,7 @@ Flags:
|
|||
--az-upload-part-size int The buffer size for multipart uploads
|
||||
(MB) (default 4)
|
||||
--az-use-emulator
|
||||
--crypto-passphrase string Passphrase for encryption/decryption
|
||||
--denied-patterns stringArray Denied file patterns case insensitive.
|
||||
The format is:
|
||||
/dir::pattern1,pattern2.
|
||||
|
@ -53,6 +54,7 @@ Flags:
|
|||
1 => AWS S3 compatible
|
||||
2 => Google Cloud Storage
|
||||
3 => Azure Blob Storage
|
||||
4 => Encrypted local filesystem
|
||||
--ftpd-cert string Path to the certificate file for FTPS
|
||||
--ftpd-key string Path to the key file for FTPS
|
||||
--ftpd-port int 0 means a random unprivileged port,
|
||||
|
|
|
@ -9,6 +9,7 @@ For system commands we have no direct control on file creation/deletion and so t
|
|||
- we cannot avoid to leak real filesystem paths
|
||||
- quota check is suboptimal
|
||||
- maximum size restriction on single file is not respected
|
||||
- data at-rest encryption is not supported
|
||||
|
||||
If quota is enabled and SFTPGo receives a system command, the used size and number of files are checked at the command start and not while new files are created/deleted. While the command is running the number of files is not checked, the remaining size is calculated as the difference between the max allowed quota and the used one, and it is checked against the bytes transferred via SSH. The command is aborted if it uploads more bytes than the remaining allowed size calculated at the command start. Anyway, we only see the bytes that the remote command sends to the local one via SSH. These bytes contain both protocol commands and files, and so the size of the files is different from the size transferred via SSH: for example, a command can send compressed files, or a protocol command (few bytes) could delete a big file. To mitigate these issues, quotas are recalculated at the command end with a full scan of the directory specified for the system command. This could be heavy for big directories. If you need system commands and quotas you could consider disabling quota restrictions and periodically update quota usage yourself using the REST API.
|
||||
|
||||
|
|
|
@ -55,12 +55,20 @@ Output:
|
|||
"download_bandwidth": 60,
|
||||
"expiration_date": 1546297200000,
|
||||
"filesystem": {
|
||||
"gcsconfig": {},
|
||||
"azblobconfig": {
|
||||
"account_key": {}
|
||||
},
|
||||
"cryptconfig": {
|
||||
"passphrase": {}
|
||||
},
|
||||
"gcsconfig": {
|
||||
"credentials": {}
|
||||
},
|
||||
"provider": 1,
|
||||
"s3config": {
|
||||
"access_key": "accesskey",
|
||||
"access_secret": {
|
||||
"payload": "dcd07e64a5ef5ede37b978198ca396ea9aee92453208ee2fee6f25407e47bf2119ba8edf2e81f91999bd5386c1a7",
|
||||
"payload": "ALVIG4egZxRjKH8/8NsJViA7EH5MqsweqmwLhGj4M4AGYgMM2ygF7kbCw+R5aQ==",
|
||||
"status": "Secretbox"
|
||||
},
|
||||
"bucket": "test",
|
||||
|
@ -181,6 +189,9 @@ Output:
|
|||
"azblobconfig": {
|
||||
"account_key": {}
|
||||
},
|
||||
"cryptconfig": {
|
||||
"passphrase": {}
|
||||
},
|
||||
"gcsconfig": {
|
||||
"credentials": {}
|
||||
},
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
import argparse
|
||||
import base64
|
||||
from datetime import datetime
|
||||
import json
|
||||
import platform
|
||||
|
@ -84,7 +83,7 @@ class SFTPGoApiRequests:
|
|||
denied_patterns=[], allowed_patterns=[], s3_upload_part_size=0, s3_upload_concurrency=0,
|
||||
max_upload_file_size=0, denied_protocols=[], az_container='', az_account_name='', az_account_key='',
|
||||
az_sas_url='', az_endpoint='', az_upload_part_size=0, az_upload_concurrency=0, az_key_prefix='',
|
||||
az_use_emulator=False, az_access_tier='', additional_info=''):
|
||||
az_use_emulator=False, az_access_tier='', additional_info='', crypto_passphrase=''):
|
||||
user = {'id':user_id, 'username':username, 'uid':uid, 'gid':gid,
|
||||
'max_sessions':max_sessions, 'quota_size':quota_size, 'quota_files':quota_files,
|
||||
'upload_bandwidth':upload_bandwidth, 'download_bandwidth':download_bandwidth,
|
||||
|
@ -111,7 +110,7 @@ class SFTPGoApiRequests:
|
|||
gcs_automatic_credentials, s3_upload_part_size, s3_upload_concurrency,
|
||||
az_container, az_account_name, az_account_key, az_sas_url,
|
||||
az_endpoint, az_upload_part_size, az_upload_concurrency, az_key_prefix,
|
||||
az_use_emulator, az_access_tier)})
|
||||
az_use_emulator, az_access_tier, crypto_passphrase)})
|
||||
return user
|
||||
|
||||
def buildVirtualFolders(self, vfolders):
|
||||
|
@ -235,7 +234,7 @@ class SFTPGoApiRequests:
|
|||
s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class,
|
||||
gcs_credentials_file, gcs_automatic_credentials, s3_upload_part_size, s3_upload_concurrency,
|
||||
az_container, az_account_name, az_account_key, az_sas_url, az_endpoint, az_upload_part_size,
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier):
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, crypto_passphrase):
|
||||
fs_config = {'provider':0}
|
||||
if fs_provider == 'S3':
|
||||
secret = {}
|
||||
|
@ -266,6 +265,9 @@ class SFTPGoApiRequests:
|
|||
'upload_concurrency':az_upload_concurrency, 'key_prefix':az_key_prefix, 'use_emulator':
|
||||
az_use_emulator, 'access_tier':az_access_tier}
|
||||
fs_config.update({'provider':3, 'azblobconfig':azureconfig})
|
||||
elif fs_provider == "Crypto":
|
||||
cryptoconfig = {"passphrase":{"status":"Plain", "payload":crypto_passphrase}}
|
||||
fs_config.update({'provider':4, 'cryptconfig':cryptoconfig})
|
||||
return fs_config
|
||||
|
||||
def getUsers(self, limit=100, offset=0, order='ASC', username=''):
|
||||
|
@ -285,7 +287,8 @@ class SFTPGoApiRequests:
|
|||
denied_login_methods=[], virtual_folders=[], denied_patterns=[], allowed_patterns=[],
|
||||
s3_upload_part_size=0, s3_upload_concurrency=0, max_upload_file_size=0, denied_protocols=[], az_container="",
|
||||
az_account_name='', az_account_key='', az_sas_url='', az_endpoint='', az_upload_part_size=0,
|
||||
az_upload_concurrency=0, az_key_prefix='', az_use_emulator=False, az_access_tier='', additional_info=''):
|
||||
az_upload_concurrency=0, az_key_prefix='', az_use_emulator=False, az_access_tier='', additional_info='',
|
||||
crypto_passphrase=''):
|
||||
u = self.buildUserObject(0, username, password, public_keys, home_dir, uid, gid, max_sessions,
|
||||
quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth,
|
||||
status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key,
|
||||
|
@ -293,7 +296,7 @@ class SFTPGoApiRequests:
|
|||
gcs_credentials_file, gcs_automatic_credentials, denied_login_methods, virtual_folders, denied_patterns,
|
||||
allowed_patterns, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size, denied_protocols,
|
||||
az_container, az_account_name, az_account_key, az_sas_url, az_endpoint, az_upload_part_size,
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, additional_info)
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, additional_info, crypto_passphrase)
|
||||
r = requests.post(self.userPath, json=u, auth=self.auth, verify=self.verify)
|
||||
self.printResponse(r)
|
||||
|
||||
|
@ -306,7 +309,7 @@ class SFTPGoApiRequests:
|
|||
allowed_patterns=[], s3_upload_part_size=0, s3_upload_concurrency=0, max_upload_file_size=0,
|
||||
denied_protocols=[], disconnect=0, az_container='', az_account_name='', az_account_key='', az_sas_url='',
|
||||
az_endpoint='', az_upload_part_size=0, az_upload_concurrency=0, az_key_prefix='', az_use_emulator=False,
|
||||
az_access_tier='', additional_info=''):
|
||||
az_access_tier='', additional_info='', crypto_passphrase=''):
|
||||
u = self.buildUserObject(user_id, username, password, public_keys, home_dir, uid, gid, max_sessions,
|
||||
quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth,
|
||||
status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key,
|
||||
|
@ -314,7 +317,7 @@ class SFTPGoApiRequests:
|
|||
gcs_credentials_file, gcs_automatic_credentials, denied_login_methods, virtual_folders, denied_patterns,
|
||||
allowed_patterns, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size, denied_protocols,
|
||||
az_container, az_account_name, az_account_key, az_sas_url, az_endpoint, az_upload_part_size,
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, additional_info)
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, additional_info, crypto_passphrase)
|
||||
r = requests.put(urlparse.urljoin(self.userPath, 'user/' + str(user_id)), params={'disconnect':disconnect},
|
||||
json=u, auth=self.auth, verify=self.verify)
|
||||
self.printResponse(r)
|
||||
|
@ -622,7 +625,7 @@ def addCommonUserArguments(parser):
|
|||
parser.add_argument('--allowed-patterns', type=str, nargs='*', default=[], help='Allowed file patterns case insensitive. '
|
||||
+'The format is /dir::pattern1,pattern2. For example: "/somedir::*.jpg,a*b?.png" "/otherdir/subdir::*.zip,*.rar". ' +
|
||||
'Default: %(default)s')
|
||||
parser.add_argument('--fs', type=str, default='local', choices=['local', 'S3', 'GCS', "AzureBlob"],
|
||||
parser.add_argument('--fs', type=str, default='local', choices=['local', 'S3', 'GCS', "AzureBlob", "Crypto"],
|
||||
help='Filesystem provider. Default: %(default)s')
|
||||
parser.add_argument('--s3-bucket', type=str, default='', help='Default: %(default)s')
|
||||
parser.add_argument('--s3-key-prefix', type=str, default='', help='Virtual root directory. If non empty only this ' +
|
||||
|
@ -660,6 +663,8 @@ def addCommonUserArguments(parser):
|
|||
'directory and its contents will be available. Cannot start with "/". For example "folder/subfolder/".' +
|
||||
' Default: %(default)s')
|
||||
parser.add_argument('--az-use-emulator', type=bool, default=False, help='Default: %(default)s')
|
||||
parser.add_argument('--crypto-passphrase', type=str, default='', help='Passphrase for encryption/decryption, to use ' +
|
||||
'with Crypto filesystem')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -816,7 +821,7 @@ if __name__ == '__main__':
|
|||
args.s3_upload_part_size, args.s3_upload_concurrency, args.max_upload_file_size, args.denied_protocols,
|
||||
args.az_container, args.az_account_name, args.az_account_key, args.az_sas_url, args.az_endpoint,
|
||||
args.az_upload_part_size, args.az_upload_concurrency, args.az_key_prefix, args.az_use_emulator,
|
||||
args.az_access_tier, args.additional_info)
|
||||
args.az_access_tier, args.additional_info, args.crypto_passphrase)
|
||||
elif args.command == 'update-user':
|
||||
api.updateUser(args.id, args.username, args.password, args.public_keys, args.home_dir, args.uid, args.gid,
|
||||
args.max_sessions, args.quota_size, args.quota_files, args.permissions, args.upload_bandwidth,
|
||||
|
@ -829,7 +834,7 @@ if __name__ == '__main__':
|
|||
args.s3_upload_concurrency, args.max_upload_file_size, args.denied_protocols, args.disconnect,
|
||||
args.az_container, args.az_account_name, args.az_account_key, args.az_sas_url, args.az_endpoint,
|
||||
args.az_upload_part_size, args.az_upload_concurrency, args.az_key_prefix, args.az_use_emulator,
|
||||
args.az_access_tier, args.additional_info)
|
||||
args.az_access_tier, args.additional_info, args.crypto_passphrase)
|
||||
elif args.command == 'delete-user':
|
||||
api.deleteUser(args.id)
|
||||
elif args.command == 'get-users':
|
||||
|
|
226
ftpd/cryptfs_test.go
Normal file
226
ftpd/cryptfs_test.go
Normal file
|
@ -0,0 +1,226 @@
|
|||
package ftpd_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sio"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpd"
|
||||
"github.com/drakkan/sftpgo/kms"
|
||||
)
|
||||
|
||||
func TestBasicFTPHandlingCryptFs(t *testing.T) {
|
||||
u := getTestUserWithCryptFs()
|
||||
u.QuotaSize = 6553600
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client, err := getFTPClient(user, true)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Len(t, common.Connections.GetStats(), 1)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
encryptedFileSize, err := getEncryptedFileSize(testFileSize)
|
||||
assert.NoError(t, err)
|
||||
expectedQuotaSize := encryptedFileSize
|
||||
expectedQuotaFiles := 1
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = checkBasicFTP(client)
|
||||
assert.NoError(t, err)
|
||||
err = ftpUploadFile(testFilePath, path.Join("/missing_dir", testFileName), testFileSize, client, 0)
|
||||
assert.Error(t, err)
|
||||
err = ftpUploadFile(testFilePath, testFileName, testFileSize, client, 0)
|
||||
assert.NoError(t, err)
|
||||
// overwrite an existing file
|
||||
err = ftpUploadFile(testFilePath, testFileName, testFileSize, client, 0)
|
||||
assert.NoError(t, err)
|
||||
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||
err = ftpDownloadFile(testFileName, localDownloadPath, testFileSize, client, 0)
|
||||
assert.NoError(t, err)
|
||||
info, err := os.Stat(localDownloadPath)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, testFileSize, info.Size())
|
||||
}
|
||||
list, err := client.List(".")
|
||||
if assert.NoError(t, err) {
|
||||
assert.Len(t, list, 1)
|
||||
assert.Equal(t, testFileSize, int64(list[0].Size))
|
||||
}
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
err = client.Rename(testFileName, testFileName+"1")
|
||||
assert.NoError(t, err)
|
||||
err = client.Delete(testFileName)
|
||||
assert.Error(t, err)
|
||||
err = client.Delete(testFileName + "1")
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles-1, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize-encryptedFileSize, user.UsedQuotaSize)
|
||||
curDir, err := client.CurrentDir()
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "/", curDir)
|
||||
}
|
||||
testDir := "testDir"
|
||||
err = client.MakeDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
err = client.ChangeDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
curDir, err = client.CurrentDir()
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, path.Join("/", testDir), curDir)
|
||||
}
|
||||
err = ftpUploadFile(testFilePath, testFileName, testFileSize, client, 0)
|
||||
assert.NoError(t, err)
|
||||
size, err := client.FileSize(path.Join("/", testDir, testFileName))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testFileSize, size)
|
||||
err = client.ChangeDirToParent()
|
||||
assert.NoError(t, err)
|
||||
curDir, err = client.CurrentDir()
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, "/", curDir)
|
||||
}
|
||||
err = client.Delete(path.Join("/", testDir, testFileName))
|
||||
assert.NoError(t, err)
|
||||
err = client.Delete(testDir)
|
||||
assert.Error(t, err)
|
||||
err = client.RemoveDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
err = client.Quit()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
assert.Eventually(t, func() bool { return len(common.Connections.GetStats()) == 0 }, 1*time.Second, 50*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestZeroBytesTransfersCryptFs(t *testing.T) {
|
||||
u := getTestUserWithCryptFs()
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client, err := getFTPClient(user, true)
|
||||
if assert.NoError(t, err) {
|
||||
testFileName := "testfilename"
|
||||
err = checkBasicFTP(client)
|
||||
assert.NoError(t, err)
|
||||
localDownloadPath := filepath.Join(homeBasePath, "emptydownload")
|
||||
err = ioutil.WriteFile(localDownloadPath, []byte(""), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = ftpUploadFile(localDownloadPath, testFileName, 0, client, 0)
|
||||
assert.NoError(t, err)
|
||||
size, err := client.FileSize(testFileName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), size)
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
assert.NoFileExists(t, localDownloadPath)
|
||||
err = ftpDownloadFile(testFileName, localDownloadPath, 0, client, 0)
|
||||
assert.NoError(t, err)
|
||||
info, err := os.Stat(localDownloadPath)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, int64(0), info.Size())
|
||||
}
|
||||
err = client.Quit()
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestResumeCryptFs(t *testing.T) {
|
||||
u := getTestUserWithCryptFs()
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client, err := getFTPClient(user, true)
|
||||
if assert.NoError(t, err) {
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
data := []byte("test data")
|
||||
err = ioutil.WriteFile(testFilePath, data, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = ftpUploadFile(testFilePath, testFileName, int64(len(data)), client, 0)
|
||||
assert.NoError(t, err)
|
||||
// upload resume is not supported
|
||||
err = ftpUploadFile(testFilePath, testFileName, int64(len(data)+5), client, 5)
|
||||
assert.Error(t, err)
|
||||
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||
err = ftpDownloadFile(testFileName, localDownloadPath, int64(4), client, 5)
|
||||
assert.NoError(t, err)
|
||||
readed, err := ioutil.ReadFile(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, data[5:], readed)
|
||||
err = ftpDownloadFile(testFileName, localDownloadPath, int64(8), client, 1)
|
||||
assert.NoError(t, err)
|
||||
readed, err = ioutil.ReadFile(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, data[1:], readed)
|
||||
err = ftpDownloadFile(testFileName, localDownloadPath, int64(0), client, 9)
|
||||
assert.NoError(t, err)
|
||||
err = client.Delete(testFileName)
|
||||
assert.NoError(t, err)
|
||||
err = ftpUploadFile(testFilePath, testFileName, int64(len(data)), client, 0)
|
||||
assert.NoError(t, err)
|
||||
// now append to a file
|
||||
srcFile, err := os.Open(testFilePath)
|
||||
if assert.NoError(t, err) {
|
||||
err = client.Append(testFileName, srcFile)
|
||||
assert.Error(t, err)
|
||||
err = srcFile.Close()
|
||||
assert.NoError(t, err)
|
||||
size, err := client.FileSize(testFileName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(len(data)), size)
|
||||
err = ftpDownloadFile(testFileName, localDownloadPath, int64(len(data)), client, 0)
|
||||
assert.NoError(t, err)
|
||||
readed, err = ioutil.ReadFile(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, data, readed)
|
||||
}
|
||||
err = client.Quit()
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func getTestUserWithCryptFs() dataprovider.User {
|
||||
user := getTestUser()
|
||||
user.FsConfig.Provider = dataprovider.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("testPassphrase")
|
||||
return user
|
||||
}
|
||||
|
||||
func getEncryptedFileSize(size int64) (int64, error) {
|
||||
encSize, err := sio.EncryptedSize(uint64(size))
|
||||
return int64(encSize) + 33, err
|
||||
}
|
|
@ -365,7 +365,11 @@ func (c *Connection) handleFTPUploadToExistingFile(flags int, resolvedPath, file
|
|||
return nil, common.ErrQuotaExceeded
|
||||
}
|
||||
minWriteOffset := int64(0)
|
||||
isResume := flags&os.O_APPEND != 0 && flags&os.O_TRUNC == 0
|
||||
// ftpserverlib set os.O_WRONLY | os.O_APPEND for APPE
|
||||
// and os.O_WRONLY | os.O_CREATE for REST. If is not APPE
|
||||
// and REST = 0 then os.O_WRONLY | os.O_CREATE | os.O_TRUNC
|
||||
// so if we don't have O_TRUC is a resume
|
||||
isResume := flags&os.O_TRUNC == 0
|
||||
// if there is a size limit remaining size cannot be 0 here, since quotaResult.HasSpace
|
||||
// will return false in this case and we deny the upload before
|
||||
maxWriteSize, err := c.GetMaxWriteSize(quotaResult, isResume, fileSize)
|
||||
|
|
|
@ -377,7 +377,7 @@ func TestUploadOverwriteErrors(t *testing.T) {
|
|||
err = os.Remove(f.Name())
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = connection.handleFTPUploadToExistingFile(0, filepath.Join(os.TempDir(), "sub", "file"),
|
||||
_, err = connection.handleFTPUploadToExistingFile(os.O_TRUNC, filepath.Join(os.TempDir(), "sub", "file"),
|
||||
filepath.Join(os.TempDir(), "sub", "file1"), 0, "/sub/file1")
|
||||
assert.Error(t, err)
|
||||
connection.Fs = vfs.NewOsFs(connID, user.GetHomeDir(), nil)
|
||||
|
|
1
go.mod
1
go.mod
|
@ -24,6 +24,7 @@ require (
|
|||
github.com/mattn/go-sqlite3 v1.14.5
|
||||
github.com/miekg/dns v1.1.35 // indirect
|
||||
github.com/minio/sha256-simd v0.1.1
|
||||
github.com/minio/sio v0.2.1
|
||||
github.com/mitchellh/mapstructure v1.3.3 // indirect
|
||||
github.com/otiai10/copy v1.2.0
|
||||
github.com/pelletier/go-toml v1.8.1 // indirect
|
||||
|
|
2
go.sum
2
go.sum
|
@ -430,6 +430,8 @@ github.com/miekg/dns v1.1.35 h1:oTfOaDH+mZkdcgdIjH6yBajRGtIwcwcaR+rt23ZSrJs=
|
|||
github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sio v0.2.1 h1:NjzKiIMSMcHediVQR0AFVx2tp7Wxh9tKPfDI3kH7aHQ=
|
||||
github.com/minio/sio v0.2.1/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
|
|
|
@ -100,6 +100,11 @@ func addUser(w http.ResponseWriter, r *http.Request) {
|
|||
sendAPIResponse(w, r, errors.New("invalid account_key"), "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
case dataprovider.CryptedFilesystemProvider:
|
||||
if user.FsConfig.CryptConfig.Passphrase.IsRedacted() {
|
||||
sendAPIResponse(w, r, errors.New("invalid passphrase"), "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
err = dataprovider.AddUser(user)
|
||||
if err == nil {
|
||||
|
@ -141,6 +146,7 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
|
|||
var currentS3AccessSecret *kms.Secret
|
||||
var currentAzAccountKey *kms.Secret
|
||||
var currentGCSCredentials *kms.Secret
|
||||
var currentCryptoPassphrase *kms.Secret
|
||||
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
|
||||
currentS3AccessSecret = user.FsConfig.S3Config.AccessSecret
|
||||
}
|
||||
|
@ -150,10 +156,15 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
|
|||
if user.FsConfig.Provider == dataprovider.GCSFilesystemProvider {
|
||||
currentGCSCredentials = user.FsConfig.GCSConfig.Credentials
|
||||
}
|
||||
if user.FsConfig.Provider == dataprovider.CryptedFilesystemProvider {
|
||||
currentCryptoPassphrase = user.FsConfig.CryptConfig.Passphrase
|
||||
}
|
||||
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
err = render.DecodeJSON(r.Body, &user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
|
@ -164,8 +175,7 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
|
|||
if len(user.Permissions) == 0 {
|
||||
user.Permissions = currentPermissions
|
||||
}
|
||||
updateEncryptedSecrets(&user, currentS3AccessSecret, currentAzAccountKey, currentGCSCredentials)
|
||||
|
||||
updateEncryptedSecrets(&user, currentS3AccessSecret, currentAzAccountKey, currentGCSCredentials, currentCryptoPassphrase)
|
||||
if user.ID != userID {
|
||||
sendAPIResponse(w, r, err, "user ID in request body does not match user ID in path parameter", http.StatusBadRequest)
|
||||
return
|
||||
|
@ -210,7 +220,8 @@ func disconnectUser(username string) {
|
|||
}
|
||||
}
|
||||
|
||||
func updateEncryptedSecrets(user *dataprovider.User, currentS3AccessSecret, currentAzAccountKey, currentGCSCredentials *kms.Secret) {
|
||||
func updateEncryptedSecrets(user *dataprovider.User, currentS3AccessSecret, currentAzAccountKey,
|
||||
currentGCSCredentials *kms.Secret, currentCryptoPassphrase *kms.Secret) {
|
||||
// we use the new access secret if plain or empty, otherwise the old value
|
||||
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
|
||||
if !user.FsConfig.S3Config.AccessSecret.IsPlain() && !user.FsConfig.S3Config.AccessSecret.IsEmpty() {
|
||||
|
@ -227,4 +238,9 @@ func updateEncryptedSecrets(user *dataprovider.User, currentS3AccessSecret, curr
|
|||
user.FsConfig.GCSConfig.Credentials = currentGCSCredentials
|
||||
}
|
||||
}
|
||||
if user.FsConfig.Provider == dataprovider.CryptedFilesystemProvider {
|
||||
if !user.FsConfig.CryptConfig.Passphrase.IsPlain() && !user.FsConfig.CryptConfig.Passphrase.IsEmpty() {
|
||||
user.FsConfig.CryptConfig.Passphrase = currentCryptoPassphrase
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -624,6 +624,9 @@ func compareUserFsConfig(expected *dataprovider.User, actual *dataprovider.User)
|
|||
if err := compareAzBlobConfig(expected, actual); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkEncryptedSecret(expected.FsConfig.CryptConfig.Passphrase, actual.FsConfig.CryptConfig.Passphrase); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -515,6 +515,14 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
|
|||
u.FsConfig.AzBlobConfig.UploadPartSize = 101
|
||||
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
|
||||
u = getTestUser()
|
||||
u.FsConfig.Provider = dataprovider.CryptedFilesystemProvider
|
||||
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.FsConfig.CryptConfig.Passphrase = kms.NewSecret(kms.SecretStatusRedacted, "akey", "", "")
|
||||
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAddUserInvalidVirtualFolders(t *testing.T) {
|
||||
|
@ -1204,6 +1212,7 @@ func TestUserAzureBlobConfig(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.AzBlobConfig.AccountKey.GetStatus())
|
||||
assert.NotEmpty(t, initialPayload)
|
||||
assert.Equal(t, initialPayload, user.FsConfig.AzBlobConfig.AccountKey.GetPayload())
|
||||
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.GetKey())
|
||||
// test user without access key and access secret (sas)
|
||||
|
@ -1229,6 +1238,58 @@ func TestUserAzureBlobConfig(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUserCryptFs(t *testing.T) {
|
||||
user, _, err := httpd.AddUser(getTestUser(), http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = dataprovider.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("crypt passphrase")
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
initialPayload := user.FsConfig.CryptConfig.Passphrase.GetPayload()
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.NotEmpty(t, initialPayload)
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
user.FsConfig.CryptConfig.Passphrase.SetStatus(kms.SecretStatusSecretBox)
|
||||
user.FsConfig.CryptConfig.Passphrase.SetAdditionalData("data")
|
||||
user.FsConfig.CryptConfig.Passphrase.SetKey("fake pass key")
|
||||
user, bb, err := httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err, string(bb))
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.Equal(t, initialPayload, user.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
user.Password = defaultPassword
|
||||
user.ID = 0
|
||||
secret := kms.NewSecret(kms.SecretStatusSecretBox, "invalid encrypted payload", "", "")
|
||||
user.FsConfig.CryptConfig.Passphrase = secret
|
||||
_, _, err = httpd.AddUser(user, http.StatusOK)
|
||||
assert.Error(t, err)
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("passphrase test")
|
||||
user, _, err = httpd.AddUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
initialPayload = user.FsConfig.CryptConfig.Passphrase.GetPayload()
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.NotEmpty(t, initialPayload)
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
user.FsConfig.Provider = dataprovider.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase.SetKey("pass")
|
||||
user, bb, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err, string(bb))
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.NotEmpty(t, initialPayload)
|
||||
assert.Equal(t, initialPayload, user.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUserHiddenFields(t *testing.T) {
|
||||
err := dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
|
@ -1240,7 +1301,7 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// sensitive data must be hidden but not deleted from the dataprovider
|
||||
usernames := []string{"user1", "user2", "user3"}
|
||||
usernames := []string{"user1", "user2", "user3", "user4"}
|
||||
u1 := getTestUser()
|
||||
u1.Username = usernames[0]
|
||||
u1.FsConfig.Provider = dataprovider.S3FilesystemProvider
|
||||
|
@ -1268,9 +1329,16 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
user3, _, err := httpd.AddUser(u3, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
||||
u4 := getTestUser()
|
||||
u4.Username = usernames[3]
|
||||
u4.FsConfig.Provider = dataprovider.CryptedFilesystemProvider
|
||||
u4.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("test passphrase")
|
||||
user4, _, err := httpd.AddUser(u4, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
||||
users, _, err := httpd.GetUsers(0, 0, "", http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(users), 3)
|
||||
assert.GreaterOrEqual(t, len(users), 4)
|
||||
for _, username := range usernames {
|
||||
users, _, err = httpd.GetUsers(0, 0, username, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1303,6 +1371,14 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
assert.NotEmpty(t, user3.FsConfig.AzBlobConfig.AccountKey.GetStatus())
|
||||
assert.NotEmpty(t, user3.FsConfig.AzBlobConfig.AccountKey.GetPayload())
|
||||
|
||||
user4, _, err = httpd.GetUserByID(user4.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, user4.Password)
|
||||
assert.Empty(t, user4.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
assert.Empty(t, user4.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
assert.NotEmpty(t, user4.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.NotEmpty(t, user4.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
|
||||
// finally check that we have all the data inside the data provider
|
||||
user1, err = dataprovider.GetUserByID(user1.ID)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1346,12 +1422,28 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
assert.Empty(t, user3.FsConfig.AzBlobConfig.AccountKey.GetKey())
|
||||
assert.Empty(t, user3.FsConfig.AzBlobConfig.AccountKey.GetAdditionalData())
|
||||
|
||||
user4, err = dataprovider.GetUserByID(user4.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, user4.Password)
|
||||
assert.NotEmpty(t, user4.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
assert.NotEmpty(t, user4.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
assert.NotEmpty(t, user4.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.NotEmpty(t, user4.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
err = user4.FsConfig.CryptConfig.Passphrase.Decrypt()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, kms.SecretStatusPlain, user4.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.Equal(t, u4.FsConfig.CryptConfig.Passphrase.GetPayload(), user4.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
assert.Empty(t, user4.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
assert.Empty(t, user4.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
|
||||
_, err = httpd.RemoveUser(user1, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user2, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user3, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user4, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
|
@ -3410,6 +3502,87 @@ func TestWebUserAzureBlobMock(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestWebUserCryptMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = dataprovider.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("crypted passphrase")
|
||||
form := make(url.Values)
|
||||
form.Set("username", user.Username)
|
||||
form.Set("home_dir", user.HomeDir)
|
||||
form.Set("uid", "0")
|
||||
form.Set("gid", strconv.FormatInt(int64(user.GID), 10))
|
||||
form.Set("max_sessions", strconv.FormatInt(int64(user.MaxSessions), 10))
|
||||
form.Set("quota_size", strconv.FormatInt(user.QuotaSize, 10))
|
||||
form.Set("quota_files", strconv.FormatInt(int64(user.QuotaFiles), 10))
|
||||
form.Set("upload_bandwidth", "0")
|
||||
form.Set("download_bandwidth", "0")
|
||||
form.Set("permissions", "*")
|
||||
form.Set("sub_dirs_permissions", "")
|
||||
form.Set("status", strconv.Itoa(user.Status))
|
||||
form.Set("expiration_date", "2020-01-01 00:00:00")
|
||||
form.Set("allowed_ip", "")
|
||||
form.Set("denied_ip", "")
|
||||
form.Set("fs_provider", "4")
|
||||
form.Set("crypt_passphrase", "")
|
||||
form.Set("allowed_extensions", "/dir1::.jpg,.png")
|
||||
form.Set("denied_extensions", "/dir2::.zip")
|
||||
form.Set("max_upload_file_size", "0")
|
||||
// passphrase cannot be empty
|
||||
b, contentType, _ := getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
form.Set("crypt_passphrase", user.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusSeeOther, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASC&username="+user.Username, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
var users []dataprovider.User
|
||||
err = render.DecodeJSON(rr.Body, &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(users))
|
||||
updateUser := users[0]
|
||||
assert.Equal(t, int64(1577836800000), updateUser.ExpirationDate)
|
||||
assert.Equal(t, 2, len(updateUser.Filters.FileExtensions))
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, updateUser.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.NotEmpty(t, updateUser.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
assert.Empty(t, updateUser.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
assert.Empty(t, updateUser.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
// now check that a redacted password is not saved
|
||||
form.Set("crypt_passphrase", "[**redacted**] ")
|
||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusSeeOther, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASC&username="+user.Username, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
users = nil
|
||||
err = render.DecodeJSON(rr.Body, &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(users))
|
||||
lastUpdatedUser := users[0]
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, lastUpdatedUser.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.Equal(t, updateUser.FsConfig.CryptConfig.Passphrase.GetPayload(), lastUpdatedUser.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
assert.Empty(t, lastUpdatedUser.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
assert.Empty(t, lastUpdatedUser.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddWebFoldersMock(t *testing.T) {
|
||||
mappedPath := filepath.Clean(os.TempDir())
|
||||
form := make(url.Values)
|
||||
|
|
|
@ -389,6 +389,11 @@ func TestCompareUserFsConfig(t *testing.T) {
|
|||
expected.FsConfig.S3Config.UploadConcurrency = 3
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
expected.FsConfig.S3Config.UploadConcurrency = 0
|
||||
expected.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("payload")
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
expected.FsConfig.CryptConfig.Passphrase = kms.NewEmptySecret()
|
||||
}
|
||||
|
||||
func TestCompareUserGCSConfig(t *testing.T) {
|
||||
|
|
|
@ -2,7 +2,7 @@ openapi: 3.0.3
|
|||
info:
|
||||
title: SFTPGo
|
||||
description: 'SFTPGo REST API'
|
||||
version: 2.1.3
|
||||
version: 2.2.0
|
||||
|
||||
servers:
|
||||
- url: /api/v1
|
||||
|
@ -1078,6 +1078,12 @@ components:
|
|||
type: boolean
|
||||
nullable: true
|
||||
description: Azure Blob Storage configuration details
|
||||
CryptFsConfig:
|
||||
type: object
|
||||
properties:
|
||||
passphrase:
|
||||
$ref: '#/components/schemas/Secret'
|
||||
description: Crypt filesystem configuration details
|
||||
FilesystemConfig:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -1088,18 +1094,22 @@ components:
|
|||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
description: >
|
||||
Providers:
|
||||
* `0` - Local filesystem
|
||||
* `1` - S3 Compatible Object Storage
|
||||
* `2` - Google Cloud Storage
|
||||
* `3` - Azure Blob Storage
|
||||
* `4` - Local filesystem encrypted
|
||||
s3config:
|
||||
$ref: '#/components/schemas/S3Config'
|
||||
gcsconfig:
|
||||
$ref: '#/components/schemas/GCSConfig'
|
||||
azblobconfig:
|
||||
$ref: '#/components/schemas/AzureBlobFsConfig'
|
||||
cryptconfig:
|
||||
$ref: '#/components/schemas/CryptFsConfig'
|
||||
description: Storage filesystem details
|
||||
BaseVirtualFolder:
|
||||
type: object
|
||||
|
|
135
httpd/web.go
135
httpd/web.go
|
@ -92,8 +92,6 @@ type userPage struct {
|
|||
RootDirPerms []string
|
||||
RedactedSecret string
|
||||
IsAdd bool
|
||||
IsS3SecretEnc bool
|
||||
IsAzSecretEnc bool
|
||||
}
|
||||
|
||||
type folderPage struct {
|
||||
|
@ -216,8 +214,6 @@ func renderAddUserPage(w http.ResponseWriter, user dataprovider.User, error stri
|
|||
ValidSSHLoginMethods: dataprovider.ValidSSHLoginMethods,
|
||||
ValidProtocols: dataprovider.ValidProtocols,
|
||||
RootDirPerms: user.GetPermissionsForPath("/"),
|
||||
IsS3SecretEnc: user.FsConfig.S3Config.AccessSecret.IsEncrypted(),
|
||||
IsAzSecretEnc: user.FsConfig.AzBlobConfig.AccountKey.IsEncrypted(),
|
||||
RedactedSecret: redactedSecret,
|
||||
}
|
||||
renderTemplate(w, templateUser, data)
|
||||
|
@ -234,8 +230,6 @@ func renderUpdateUserPage(w http.ResponseWriter, user dataprovider.User, error s
|
|||
ValidSSHLoginMethods: dataprovider.ValidSSHLoginMethods,
|
||||
ValidProtocols: dataprovider.ValidProtocols,
|
||||
RootDirPerms: user.GetPermissionsForPath("/"),
|
||||
IsS3SecretEnc: user.FsConfig.S3Config.AccessSecret.IsEncrypted(),
|
||||
IsAzSecretEnc: user.FsConfig.AzBlobConfig.AccountKey.IsEncrypted(),
|
||||
RedactedSecret: redactedSecret,
|
||||
}
|
||||
renderTemplate(w, templateUser, data)
|
||||
|
@ -444,6 +438,76 @@ func getSecretFromFormField(r *http.Request, field string) *kms.Secret {
|
|||
return secret
|
||||
}
|
||||
|
||||
func getS3Config(r *http.Request) (vfs.S3FsConfig, error) {
|
||||
var err error
|
||||
config := vfs.S3FsConfig{}
|
||||
config.Bucket = r.Form.Get("s3_bucket")
|
||||
config.Region = r.Form.Get("s3_region")
|
||||
config.AccessKey = r.Form.Get("s3_access_key")
|
||||
config.AccessSecret = getSecretFromFormField(r, "s3_access_secret")
|
||||
config.Endpoint = r.Form.Get("s3_endpoint")
|
||||
config.StorageClass = r.Form.Get("s3_storage_class")
|
||||
config.KeyPrefix = r.Form.Get("s3_key_prefix")
|
||||
config.UploadPartSize, err = strconv.ParseInt(r.Form.Get("s3_upload_part_size"), 10, 64)
|
||||
if err != nil {
|
||||
return config, err
|
||||
}
|
||||
config.UploadConcurrency, err = strconv.Atoi(r.Form.Get("s3_upload_concurrency"))
|
||||
return config, err
|
||||
}
|
||||
|
||||
func getGCSConfig(r *http.Request) (vfs.GCSFsConfig, error) {
|
||||
var err error
|
||||
config := vfs.GCSFsConfig{}
|
||||
|
||||
config.Bucket = r.Form.Get("gcs_bucket")
|
||||
config.StorageClass = r.Form.Get("gcs_storage_class")
|
||||
config.KeyPrefix = r.Form.Get("gcs_key_prefix")
|
||||
autoCredentials := r.Form.Get("gcs_auto_credentials")
|
||||
if autoCredentials != "" {
|
||||
config.AutomaticCredentials = 1
|
||||
} else {
|
||||
config.AutomaticCredentials = 0
|
||||
}
|
||||
credentials, _, err := r.FormFile("gcs_credential_file")
|
||||
if err == http.ErrMissingFile {
|
||||
return config, nil
|
||||
}
|
||||
if err != nil {
|
||||
return config, err
|
||||
}
|
||||
defer credentials.Close()
|
||||
fileBytes, err := ioutil.ReadAll(credentials)
|
||||
if err != nil || len(fileBytes) == 0 {
|
||||
if len(fileBytes) == 0 {
|
||||
err = errors.New("credentials file size must be greater than 0")
|
||||
}
|
||||
return config, err
|
||||
}
|
||||
config.Credentials = kms.NewPlainSecret(string(fileBytes))
|
||||
config.AutomaticCredentials = 0
|
||||
return config, err
|
||||
}
|
||||
|
||||
func getAzureConfig(r *http.Request) (vfs.AzBlobFsConfig, error) {
|
||||
var err error
|
||||
config := vfs.AzBlobFsConfig{}
|
||||
config.Container = r.Form.Get("az_container")
|
||||
config.AccountName = r.Form.Get("az_account_name")
|
||||
config.AccountKey = getSecretFromFormField(r, "az_account_key")
|
||||
config.SASURL = r.Form.Get("az_sas_url")
|
||||
config.Endpoint = r.Form.Get("az_endpoint")
|
||||
config.KeyPrefix = r.Form.Get("az_key_prefix")
|
||||
config.AccessTier = r.Form.Get("az_access_tier")
|
||||
config.UseEmulator = len(r.Form.Get("az_use_emulator")) > 0
|
||||
config.UploadPartSize, err = strconv.ParseInt(r.Form.Get("az_upload_part_size"), 10, 64)
|
||||
if err != nil {
|
||||
return config, err
|
||||
}
|
||||
config.UploadConcurrency, err = strconv.Atoi(r.Form.Get("az_upload_concurrency"))
|
||||
return config, err
|
||||
}
|
||||
|
||||
func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, error) {
|
||||
var fs dataprovider.Filesystem
|
||||
provider, err := strconv.Atoi(r.Form.Get("fs_provider"))
|
||||
|
@ -452,65 +516,25 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er
|
|||
}
|
||||
fs.Provider = dataprovider.FilesystemProvider(provider)
|
||||
if fs.Provider == dataprovider.S3FilesystemProvider {
|
||||
fs.S3Config.Bucket = r.Form.Get("s3_bucket")
|
||||
fs.S3Config.Region = r.Form.Get("s3_region")
|
||||
fs.S3Config.AccessKey = r.Form.Get("s3_access_key")
|
||||
fs.S3Config.AccessSecret = getSecretFromFormField(r, "s3_access_secret")
|
||||
fs.S3Config.Endpoint = r.Form.Get("s3_endpoint")
|
||||
fs.S3Config.StorageClass = r.Form.Get("s3_storage_class")
|
||||
fs.S3Config.KeyPrefix = r.Form.Get("s3_key_prefix")
|
||||
fs.S3Config.UploadPartSize, err = strconv.ParseInt(r.Form.Get("s3_upload_part_size"), 10, 64)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.S3Config.UploadConcurrency, err = strconv.Atoi(r.Form.Get("s3_upload_concurrency"))
|
||||
config, err := getS3Config(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.S3Config = config
|
||||
} else if fs.Provider == dataprovider.GCSFilesystemProvider {
|
||||
fs.GCSConfig.Bucket = r.Form.Get("gcs_bucket")
|
||||
fs.GCSConfig.StorageClass = r.Form.Get("gcs_storage_class")
|
||||
fs.GCSConfig.KeyPrefix = r.Form.Get("gcs_key_prefix")
|
||||
autoCredentials := r.Form.Get("gcs_auto_credentials")
|
||||
if len(autoCredentials) > 0 {
|
||||
fs.GCSConfig.AutomaticCredentials = 1
|
||||
} else {
|
||||
fs.GCSConfig.AutomaticCredentials = 0
|
||||
}
|
||||
credentials, _, err := r.FormFile("gcs_credential_file")
|
||||
if err == http.ErrMissingFile {
|
||||
return fs, nil
|
||||
}
|
||||
config, err := getGCSConfig(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
defer credentials.Close()
|
||||
fileBytes, err := ioutil.ReadAll(credentials)
|
||||
if err != nil || len(fileBytes) == 0 {
|
||||
if len(fileBytes) == 0 {
|
||||
err = errors.New("credentials file size must be greater than 0")
|
||||
}
|
||||
return fs, err
|
||||
}
|
||||
fs.GCSConfig.Credentials = kms.NewPlainSecret(string(fileBytes))
|
||||
fs.GCSConfig.AutomaticCredentials = 0
|
||||
fs.GCSConfig = config
|
||||
} else if fs.Provider == dataprovider.AzureBlobFilesystemProvider {
|
||||
fs.AzBlobConfig.Container = r.Form.Get("az_container")
|
||||
fs.AzBlobConfig.AccountName = r.Form.Get("az_account_name")
|
||||
fs.AzBlobConfig.AccountKey = getSecretFromFormField(r, "az_account_key")
|
||||
fs.AzBlobConfig.SASURL = r.Form.Get("az_sas_url")
|
||||
fs.AzBlobConfig.Endpoint = r.Form.Get("az_endpoint")
|
||||
fs.AzBlobConfig.KeyPrefix = r.Form.Get("az_key_prefix")
|
||||
fs.AzBlobConfig.AccessTier = r.Form.Get("az_access_tier")
|
||||
fs.AzBlobConfig.UseEmulator = len(r.Form.Get("az_use_emulator")) > 0
|
||||
fs.AzBlobConfig.UploadPartSize, err = strconv.ParseInt(r.Form.Get("az_upload_part_size"), 10, 64)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.AzBlobConfig.UploadConcurrency, err = strconv.Atoi(r.Form.Get("az_upload_concurrency"))
|
||||
config, err := getAzureConfig(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.AzBlobConfig = config
|
||||
} else if fs.Provider == dataprovider.CryptedFilesystemProvider {
|
||||
fs.CryptConfig.Passphrase = getSecretFromFormField(r, "crypt_passphrase")
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
@ -687,6 +711,9 @@ func handleWebUpdateUserPost(w http.ResponseWriter, r *http.Request) {
|
|||
if !updatedUser.FsConfig.AzBlobConfig.AccountKey.IsPlain() && !updatedUser.FsConfig.AzBlobConfig.AccountKey.IsEmpty() {
|
||||
updatedUser.FsConfig.AzBlobConfig.AccountKey = user.FsConfig.AzBlobConfig.AccountKey
|
||||
}
|
||||
if !updatedUser.FsConfig.CryptConfig.Passphrase.IsPlain() && !updatedUser.FsConfig.CryptConfig.Passphrase.IsEmpty() {
|
||||
updatedUser.FsConfig.CryptConfig.Passphrase = user.FsConfig.CryptConfig.Passphrase
|
||||
}
|
||||
err = dataprovider.UpdateUser(updatedUser)
|
||||
if err == nil {
|
||||
if len(r.Form.Get("disconnect")) > 0 {
|
||||
|
|
|
@ -260,6 +260,11 @@ func (s *Service) configurePortableUser() string {
|
|||
if payload != "" {
|
||||
s.PortableUser.FsConfig.AzBlobConfig.AccountKey = kms.NewPlainSecret(payload)
|
||||
}
|
||||
case dataprovider.CryptedFilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.CryptConfig.Passphrase.GetPayload()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret(payload)
|
||||
}
|
||||
}
|
||||
return printablePassword
|
||||
}
|
||||
|
|
484
sftpd/cryptfs_test.go
Normal file
484
sftpd/cryptfs_test.go
Normal file
|
@ -0,0 +1,484 @@
|
|||
package sftpd_test
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sio"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpd"
|
||||
"github.com/drakkan/sftpgo/kms"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
const (
|
||||
testPassphrase = "test passphrase"
|
||||
)
|
||||
|
||||
func TestBasicSFTPCryptoHandling(t *testing.T) {
|
||||
usePubKey := false
|
||||
u := getTestUserWithCryptFs(usePubKey)
|
||||
u.QuotaSize = 6553600
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
encryptedFileSize, err := getEncryptedFileSize(testFileSize)
|
||||
assert.NoError(t, err)
|
||||
expectedQuotaSize := user.UsedQuotaSize + encryptedFileSize
|
||||
expectedQuotaFiles := user.UsedQuotaFiles + 1
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = sftpUploadFile(testFilePath, path.Join("/missing_dir", testFileName), testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||
err = sftpDownloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
initialHash, err := computeHashForFile(sha256.New(), testFilePath)
|
||||
assert.NoError(t, err)
|
||||
downloadedFileHash, err := computeHashForFile(sha256.New(), localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, initialHash, downloadedFileHash)
|
||||
info, err := os.Stat(filepath.Join(user.HomeDir, testFileName))
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, encryptedFileSize, info.Size())
|
||||
}
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
result, err := client.ReadDir(".")
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, result, 1) {
|
||||
assert.Equal(t, testFileSize, result[0].Size())
|
||||
}
|
||||
info, err = client.Stat(testFileName)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, testFileSize, info.Size())
|
||||
}
|
||||
err = client.Remove(testFileName)
|
||||
assert.NoError(t, err)
|
||||
_, err = client.Lstat(testFileName)
|
||||
assert.Error(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles-1, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize-encryptedFileSize, user.UsedQuotaSize)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestOpenReadWriteCryptoFs(t *testing.T) {
|
||||
// read and write is not supported on crypto fs
|
||||
usePubKey := false
|
||||
u := getTestUserWithCryptFs(usePubKey)
|
||||
u.QuotaSize = 6553600
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
sftpFile, err := client.OpenFile(testFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
|
||||
if assert.NoError(t, err) {
|
||||
testData := []byte("sample test data")
|
||||
n, err := sftpFile.Write(testData)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(testData), n)
|
||||
buffer := make([]byte, 128)
|
||||
_, err = sftpFile.ReadAt(buffer, 1)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "SSH_FX_OP_UNSUPPORTED")
|
||||
}
|
||||
err = sftpFile.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestEmptyFile(t *testing.T) {
|
||||
usePubKey := true
|
||||
u := getTestUserWithCryptFs(usePubKey)
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
sftpFile, err := client.OpenFile(testFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
|
||||
if assert.NoError(t, err) {
|
||||
testData := []byte("")
|
||||
n, err := sftpFile.Write(testData)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(testData), n)
|
||||
err = sftpFile.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
info, err := client.Stat(testFileName)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, int64(0), info.Size())
|
||||
}
|
||||
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||
err = sftpDownloadFile(testFileName, localDownloadPath, 0, client)
|
||||
assert.NoError(t, err)
|
||||
encryptedFileSize, err := getEncryptedFileSize(0)
|
||||
assert.NoError(t, err)
|
||||
info, err = os.Stat(filepath.Join(user.HomeDir, testFileName))
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, encryptedFileSize, info.Size())
|
||||
}
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUploadResumeCryptFs(t *testing.T) {
|
||||
// upload resume is not supported
|
||||
usePubKey := true
|
||||
u := getTestUserWithCryptFs(usePubKey)
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
appendDataSize := int64(65535)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = appendToTestFile(testFilePath, appendDataSize)
|
||||
assert.NoError(t, err)
|
||||
err = sftpUploadResumeFile(testFilePath, testFileName, testFileSize, false, client)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "SSH_FX_OP_UNSUPPORTED")
|
||||
}
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestQuotaFileReplaceCryptFs(t *testing.T) {
|
||||
usePubKey := false
|
||||
u := getTestUserWithCryptFs(usePubKey)
|
||||
u.QuotaFiles = 1000
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
testFileSize := int64(65535)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
encryptedFileSize, err := getEncryptedFileSize(testFileSize)
|
||||
assert.NoError(t, err)
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) { //nolint:dupl
|
||||
defer client.Close()
|
||||
expectedQuotaSize := user.UsedQuotaSize + encryptedFileSize
|
||||
expectedQuotaFiles := user.UsedQuotaFiles + 1
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
// now replace the same file, the quota must not change
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
// now create a symlink, replace it with a file and check the quota
|
||||
// replacing a symlink is like uploading a new file
|
||||
err = client.Symlink(testFileName, testFileName+".link")
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
expectedQuotaFiles = expectedQuotaFiles + 1
|
||||
expectedQuotaSize = expectedQuotaSize + encryptedFileSize
|
||||
err = sftpUploadFile(testFilePath, testFileName+".link", testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
}
|
||||
// now set a quota size restriction and upload the same file, upload should fail for space limit exceeded
|
||||
user.QuotaSize = encryptedFileSize*2 - 1
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
client, err = getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.Error(t, err, "quota size exceeded, file upload must fail")
|
||||
err = client.Remove(testFileName)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestQuotaScanCryptFs(t *testing.T) {
|
||||
usePubKey := false
|
||||
user, _, err := httpd.AddUser(getTestUserWithCryptFs(usePubKey), http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFileSize := int64(65535)
|
||||
encryptedFileSize, err := getEncryptedFileSize(testFileSize)
|
||||
assert.NoError(t, err)
|
||||
expectedQuotaSize := user.UsedQuotaSize + encryptedFileSize
|
||||
expectedQuotaFiles := user.UsedQuotaFiles + 1
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
// create user with the same home dir, so there is at least an untracked file
|
||||
user, _, err = httpd.AddUser(getTestUser(usePubKey), http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.StartQuotaScan(user, http.StatusAccepted)
|
||||
assert.NoError(t, err)
|
||||
assert.Eventually(t, func() bool {
|
||||
scans, _, err := httpd.GetQuotaScans(http.StatusOK)
|
||||
if err == nil {
|
||||
return len(scans) == 0
|
||||
}
|
||||
return false
|
||||
}, 1*time.Second, 50*time.Millisecond)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetMimeType(t *testing.T) {
|
||||
usePubKey := true
|
||||
user, _, err := httpd.AddUser(getTestUserWithCryptFs(usePubKey), http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
sftpFile, err := client.OpenFile(testFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
|
||||
if assert.NoError(t, err) {
|
||||
testData := []byte("some UTF-8 text so we should get a text/plain mime type")
|
||||
n, err := sftpFile.Write(testData)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(testData), n)
|
||||
err = sftpFile.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret(testPassphrase)
|
||||
fs, err := user.GetFilesystem("connID")
|
||||
if assert.NoError(t, err) {
|
||||
assert.True(t, vfs.IsCryptOsFs(fs))
|
||||
mime, err := fs.GetMimeType(filepath.Join(user.GetHomeDir(), testFileName))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", mime)
|
||||
}
|
||||
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTruncate(t *testing.T) {
|
||||
// truncate is not supported
|
||||
usePubKey := true
|
||||
user, _, err := httpd.AddUser(getTestUserWithCryptFs(usePubKey), http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
f, err := client.OpenFile(testFileName, os.O_WRONLY)
|
||||
if assert.NoError(t, err) {
|
||||
err = f.Truncate(0)
|
||||
assert.NoError(t, err)
|
||||
err = f.Truncate(1)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
err = f.Close()
|
||||
assert.NoError(t, err)
|
||||
err = client.Truncate(testFileName, 0)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSCPBasicHandlingCryptoFs(t *testing.T) {
|
||||
if len(scpPath) == 0 {
|
||||
t.Skip("scp command not found, unable to execute this test")
|
||||
}
|
||||
usePubKey := true
|
||||
u := getTestUserWithCryptFs(usePubKey)
|
||||
u.QuotaSize = 6553600
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(131074)
|
||||
encryptedFileSize, err := getEncryptedFileSize(testFileSize)
|
||||
assert.NoError(t, err)
|
||||
expectedQuotaSize := user.UsedQuotaSize + encryptedFileSize
|
||||
expectedQuotaFiles := user.UsedQuotaFiles + 1
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
remoteUpPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, "/")
|
||||
remoteDownPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, path.Join("/", testFileName))
|
||||
localPath := filepath.Join(homeBasePath, "scp_download.dat")
|
||||
// test to download a missing file
|
||||
err = scpDownload(localPath, remoteDownPath, false, false)
|
||||
assert.Error(t, err, "downloading a missing file via scp must fail")
|
||||
err = scpUpload(testFilePath, remoteUpPath, false, false)
|
||||
assert.NoError(t, err)
|
||||
err = scpDownload(localPath, remoteDownPath, false, false)
|
||||
assert.NoError(t, err)
|
||||
fi, err := os.Stat(localPath)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, testFileSize, fi.Size())
|
||||
}
|
||||
fi, err = os.Stat(filepath.Join(user.GetHomeDir(), testFileName))
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, encryptedFileSize, fi.Size())
|
||||
}
|
||||
err = os.Remove(localPath)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
// now overwrite the existing file
|
||||
err = scpUpload(testFilePath, remoteUpPath, false, false)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSCPRecursiveCryptFs(t *testing.T) {
|
||||
if len(scpPath) == 0 {
|
||||
t.Skip("scp command not found, unable to execute this test")
|
||||
}
|
||||
usePubKey := true
|
||||
u := getTestUserWithCryptFs(usePubKey)
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testBaseDirName := "atestdir"
|
||||
testBaseDirPath := filepath.Join(homeBasePath, testBaseDirName)
|
||||
testBaseDirDownName := "test_dir_down" //nolint:goconst
|
||||
testBaseDirDownPath := filepath.Join(homeBasePath, testBaseDirDownName)
|
||||
testFilePath := filepath.Join(homeBasePath, testBaseDirName, testFileName)
|
||||
testFilePath1 := filepath.Join(homeBasePath, testBaseDirName, testBaseDirName, testFileName)
|
||||
testFileSize := int64(131074)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = createTestFile(testFilePath1, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
remoteDownPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, path.Join("/", testBaseDirName))
|
||||
remoteUpPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, "/")
|
||||
err = scpUpload(testBaseDirPath, remoteUpPath, true, false)
|
||||
assert.NoError(t, err)
|
||||
// overwrite existing dir
|
||||
err = scpUpload(testBaseDirPath, remoteUpPath, true, false)
|
||||
assert.NoError(t, err)
|
||||
err = scpDownload(testBaseDirDownPath, remoteDownPath, true, true)
|
||||
assert.NoError(t, err)
|
||||
// test download without passing -r
|
||||
err = scpDownload(testBaseDirDownPath, remoteDownPath, true, false)
|
||||
assert.Error(t, err, "recursive download without -r must fail")
|
||||
|
||||
fi, err := os.Stat(filepath.Join(testBaseDirDownPath, testFileName))
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, testFileSize, fi.Size())
|
||||
}
|
||||
fi, err = os.Stat(filepath.Join(testBaseDirDownPath, testBaseDirName, testFileName))
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, testFileSize, fi.Size())
|
||||
}
|
||||
// upload to a non existent dir
|
||||
remoteUpPath = fmt.Sprintf("%v@127.0.0.1:%v", user.Username, "/non_existent_dir")
|
||||
err = scpUpload(testBaseDirPath, remoteUpPath, true, false)
|
||||
assert.Error(t, err, "uploading via scp to a non existent dir must fail")
|
||||
|
||||
err = os.RemoveAll(testBaseDirPath)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(testBaseDirDownPath)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func getEncryptedFileSize(size int64) (int64, error) {
|
||||
encSize, err := sio.EncryptedSize(uint64(size))
|
||||
return int64(encSize) + 33, err
|
||||
}
|
||||
|
||||
func getTestUserWithCryptFs(usePubKey bool) dataprovider.User {
|
||||
u := getTestUser(usePubKey)
|
||||
u.FsConfig.Provider = dataprovider.CryptedFilesystemProvider
|
||||
u.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret(testPassphrase)
|
||||
return u
|
||||
}
|
|
@ -1463,8 +1463,9 @@ func TestSCPDownloadFileData(t *testing.T) {
|
|||
WriteError: writeErr,
|
||||
}
|
||||
connection := &Connection{
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, dataprovider.User{}, nil),
|
||||
channel: &mockSSHChannelReadErr,
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, dataprovider.User{},
|
||||
vfs.NewOsFs("", os.TempDir(), nil)),
|
||||
channel: &mockSSHChannelReadErr,
|
||||
}
|
||||
scpCommand := scpCommand{
|
||||
sshCommand: sshCommand{
|
||||
|
|
|
@ -404,6 +404,9 @@ func (c *scpCommand) sendDownloadFileData(filePath string, stat os.FileInfo, tra
|
|||
return err
|
||||
}
|
||||
}
|
||||
if vfs.IsCryptOsFs(c.connection.Fs) {
|
||||
stat = c.connection.Fs.(*vfs.CryptFs).ConvertFileInfo(stat)
|
||||
}
|
||||
|
||||
fileSize := stat.Size()
|
||||
readed := int64(0)
|
||||
|
|
|
@ -580,9 +580,9 @@ func TestUploadResume(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
initialHash, err := computeHashForFile(sha256.New(), testFilePath)
|
||||
assert.NoError(t, err)
|
||||
donwloadedFileHash, err := computeHashForFile(sha256.New(), localDownloadPath)
|
||||
downloadedFileHash, err := computeHashForFile(sha256.New(), localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, initialHash, donwloadedFileHash)
|
||||
assert.Equal(t, initialHash, downloadedFileHash)
|
||||
err = sftpUploadResumeFile(testFilePath, testFileName, testFileSize+appendDataSize, true, client)
|
||||
assert.Error(t, err, "file upload resume with invalid offset must fail")
|
||||
err = os.Remove(testFilePath)
|
||||
|
@ -2198,7 +2198,7 @@ func TestQuotaFileReplace(t *testing.T) {
|
|||
testFileSize := int64(65535)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
if assert.NoError(t, err) { //nolint:dupl
|
||||
defer client.Close()
|
||||
expectedQuotaSize := user.UsedQuotaSize + testFileSize
|
||||
expectedQuotaFiles := user.UsedQuotaFiles + 1
|
||||
|
@ -6930,6 +6930,8 @@ func TestSCPBasicHandling(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSCPUploadFileOverwrite(t *testing.T) {
|
||||
|
|
|
@ -305,7 +305,8 @@
|
|||
<label for="idFilesystem" class="col-sm-2 col-form-label">Storage</label>
|
||||
<div class="col-sm-10">
|
||||
<select class="form-control" id="idFilesystem" name="fs_provider" onchange="onFilesystemChanged(this.value)">
|
||||
<option value="0" {{if eq .User.FsConfig.Provider 0 }}selected{{end}}>local</option>
|
||||
<option value="0" {{if eq .User.FsConfig.Provider 0 }}selected{{end}}>Local</option>
|
||||
<option value="4" {{if eq .User.FsConfig.Provider 4 }}selected{{end}}>Local encrypted</option>
|
||||
<option value="1" {{if eq .User.FsConfig.Provider 1 }}selected{{end}}>AWS S3 (Compatible)</option>
|
||||
<option value="2" {{if eq .User.FsConfig.Provider 2 }}selected{{end}}>Google Cloud Storage</option>
|
||||
<option value="3" {{if eq .User.FsConfig.Provider 3 }}selected{{end}}>Azure Blob Storage</option>
|
||||
|
@ -313,6 +314,14 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row crypt">
|
||||
<label for="idCryptPassphrase" class="col-sm-2 col-form-label">Passphrase</label>
|
||||
<div class="col-sm-10">
|
||||
<input type="text" class="form-control" id="idCryptPassphrase" name="crypt_passphrase" placeholder=""
|
||||
value="{{if .User.FsConfig.CryptConfig.Passphrase.IsEncrypted}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.CryptConfig.Passphrase.GetPayload}}{{end}}" maxlength="1000">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row s3">
|
||||
<label for="idS3Bucket" class="col-sm-2 col-form-label">Bucket</label>
|
||||
<div class="col-sm-3">
|
||||
|
@ -337,7 +346,7 @@
|
|||
<label for="idS3AccessSecret" class="col-sm-2 col-form-label">Access Secret</label>
|
||||
<div class="col-sm-3">
|
||||
<input type="text" class="form-control" id="idS3AccessSecret" name="s3_access_secret" placeholder=""
|
||||
value="{{if .IsS3SecretEnc}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.S3Config.AccessSecret.GetPayload}}{{end}}" maxlength="1000">
|
||||
value="{{if .User.FsConfig.S3Config.AccessSecret.IsEncrypted}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.S3Config.AccessSecret.GetPayload}}{{end}}" maxlength="1000">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
@ -448,7 +457,7 @@
|
|||
<label for="idAzAccountKey" class="col-sm-2 col-form-label">Account Key</label>
|
||||
<div class="col-sm-10">
|
||||
<input type="text" class="form-control" id="idAzAccountKey" name="az_account_key" placeholder=""
|
||||
value="{{if .IsAzSecretEnc}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.AzBlobConfig.AccountKey.GetPayload}}{{end}}" maxlength="1000">
|
||||
value="{{if .User.FsConfig.AzBlobConfig.AccountKey.IsEncrypted}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.AzBlobConfig.AccountKey.GetPayload}}{{end}}" maxlength="1000">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
@ -592,25 +601,36 @@
|
|||
$('.form-group.gcs').hide();
|
||||
$('.form-group.row.azblob').hide();
|
||||
$('.form-group.azblob').hide();
|
||||
$('.form-group.crypt').hide();
|
||||
$('.form-group.row.s3').show();
|
||||
} else if (val == '2'){
|
||||
$('.form-group.row.gcs').show();
|
||||
$('.form-group.gcs').show();
|
||||
$('.form-group.row.azblob').hide();
|
||||
$('.form-group.azblob').hide();
|
||||
$('.form-group.crypt').hide();
|
||||
$('.form-group.row.s3').hide();
|
||||
} else if (val == '3'){
|
||||
$('.form-group.row.azblob').show();
|
||||
$('.form-group.azblob').show();
|
||||
$('.form-group.row.gcs').hide();
|
||||
$('.form-group.gcs').hide();
|
||||
$('.form-group.crypt').hide();
|
||||
$('.form-group.row.s3').hide();
|
||||
} else if (val == '4'){
|
||||
$('.form-group.row.gcs').hide();
|
||||
$('.form-group.gcs').hide();
|
||||
$('.form-group.row.s3').hide();
|
||||
$('.form-group.row.azblob').hide();
|
||||
$('.form-group.azblob').hide();
|
||||
$('.form-group.crypt').show();
|
||||
} else {
|
||||
$('.form-group.row.gcs').hide();
|
||||
$('.form-group.gcs').hide();
|
||||
$('.form-group.row.s3').hide();
|
||||
$('.form-group.row.azblob').hide();
|
||||
$('.form-group.azblob').hide();
|
||||
$('.form-group.crypt').hide();
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
|
355
vfs/cryptfs.go
Normal file
355
vfs/cryptfs.go
Normal file
|
@ -0,0 +1,355 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/eikenb/pipeat"
|
||||
"github.com/minio/sha256-simd"
|
||||
"github.com/minio/sio"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
// cryptFsName is the name for the local Fs implementation with encryption support
|
||||
cryptFsName = "cryptfs"
|
||||
version10 byte = 0x10
|
||||
nonceV10Size int = 32
|
||||
headerV10Size int64 = 33 // 1 (version byte) + 32 (nonce size)
|
||||
)
|
||||
|
||||
// CryptFs is a Fs implementation that allows to encrypts/decrypts local files
|
||||
type CryptFs struct {
|
||||
*OsFs
|
||||
masterKey []byte
|
||||
}
|
||||
|
||||
// NewCryptFs returns a CryptFs object
|
||||
func NewCryptFs(connectionID, rootDir string, config CryptFsConfig) (Fs, error) {
|
||||
if err := ValidateCryptFsConfig(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.Passphrase.IsEncrypted() {
|
||||
if err := config.Passphrase.Decrypt(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
fs := &CryptFs{
|
||||
OsFs: &OsFs{
|
||||
name: cryptFsName,
|
||||
connectionID: connectionID,
|
||||
rootDir: rootDir,
|
||||
virtualFolders: nil,
|
||||
},
|
||||
masterKey: []byte(config.Passphrase.GetPayload()),
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
// Name returns the name for the Fs implementation
|
||||
func (fs *CryptFs) Name() string {
|
||||
return fs.name
|
||||
}
|
||||
|
||||
// Open opens the named file for reading
|
||||
func (fs *CryptFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, func(), error) {
|
||||
f, key, err := fs.getFileAndEncryptionKey(name)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
isZeroDownload, err := isZeroBytesDownload(f, offset)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
r, w, err := pipeat.PipeInDir(fs.rootDir)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if isZeroDownload {
|
||||
w.CloseWithError(err) //nolint:errcheck
|
||||
f.Close()
|
||||
fsLog(fs, logger.LevelDebug, "zero bytes download completed, path: %#v", name)
|
||||
return
|
||||
}
|
||||
var n int64
|
||||
var err error
|
||||
|
||||
if offset == 0 {
|
||||
n, err = sio.Decrypt(w, f, fs.getSIOConfig(key))
|
||||
} else {
|
||||
var readerAt io.ReaderAt
|
||||
var readed, written int
|
||||
buf := make([]byte, 65536)
|
||||
wrapper := &cryptedFileWrapper{
|
||||
File: f,
|
||||
}
|
||||
readerAt, err = sio.DecryptReaderAt(wrapper, fs.getSIOConfig(key))
|
||||
if err == nil {
|
||||
finished := false
|
||||
for !finished {
|
||||
readed, err = readerAt.ReadAt(buf, offset)
|
||||
if err != nil && err != io.EOF {
|
||||
break
|
||||
}
|
||||
if err == io.EOF {
|
||||
finished = true
|
||||
err = nil
|
||||
}
|
||||
if readed > 0 {
|
||||
written, err = w.Write(buf[:readed])
|
||||
n += int64(written)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
break
|
||||
}
|
||||
if readed != written {
|
||||
err = io.ErrShortWrite
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
w.CloseWithError(err) //nolint:errcheck
|
||||
f.Close()
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
|
||||
}()
|
||||
|
||||
return nil, r, nil, nil
|
||||
}
|
||||
|
||||
// Create creates or opens the named file for writing
|
||||
func (fs *CryptFs) Create(name string, flag int) (File, *PipeWriter, func(), error) {
|
||||
var err error
|
||||
var f *os.File
|
||||
if flag == 0 {
|
||||
f, err = os.Create(name)
|
||||
} else {
|
||||
f, err = os.OpenFile(name, flag, os.ModePerm)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
header := encryptedFileHeader{
|
||||
version: version10,
|
||||
nonce: make([]byte, 32),
|
||||
}
|
||||
_, err = io.ReadFull(rand.Reader, header.nonce)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
var key [32]byte
|
||||
kdf := hkdf.New(sha256.New, fs.masterKey, header.nonce, nil)
|
||||
_, err = io.ReadFull(kdf, key[:])
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
r, w, err := pipeat.PipeInDir(fs.rootDir)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
err = header.Store(f)
|
||||
if err != nil {
|
||||
r.Close()
|
||||
w.Close()
|
||||
f.Close()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
p := NewPipeWriter(w)
|
||||
|
||||
go func() {
|
||||
n, err := sio.Encrypt(f, r, fs.getSIOConfig(key))
|
||||
f.Close()
|
||||
r.CloseWithError(err) //nolint:errcheck
|
||||
p.Done(err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v", name, n, err)
|
||||
}()
|
||||
|
||||
return nil, p, nil, nil
|
||||
}
|
||||
|
||||
// Truncate changes the size of the named file
|
||||
func (*CryptFs) Truncate(name string, size int64) error {
|
||||
return ErrVfsUnsupported
|
||||
}
|
||||
|
||||
// ReadDir reads the directory named by dirname and returns
|
||||
// a list of directory entries.
|
||||
func (fs *CryptFs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]os.FileInfo, 0, len(list))
|
||||
for _, info := range list {
|
||||
result = append(result, fs.ConvertFileInfo(info))
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// IsUploadResumeSupported returns false sio does not support random access writes
|
||||
func (*CryptFs) IsUploadResumeSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsAtomicUploadSupported returns true if atomic upload is supported
|
||||
func (*CryptFs) IsAtomicUploadSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetMimeType returns the content type
|
||||
func (fs *CryptFs) GetMimeType(name string) (string, error) {
|
||||
f, key, err := fs.getFileAndEncryptionKey(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
readSize, err := sio.DecryptedSize(512)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf := make([]byte, readSize)
|
||||
n, err := io.ReadFull(f, buf)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return "", err
|
||||
}
|
||||
|
||||
decrypted := bytes.NewBuffer(nil)
|
||||
_, err = sio.Decrypt(decrypted, bytes.NewBuffer(buf[:n]), fs.getSIOConfig(key))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ctype := http.DetectContentType(decrypted.Bytes())
|
||||
// Rewind file.
|
||||
_, err = f.Seek(0, io.SeekStart)
|
||||
return ctype, err
|
||||
}
|
||||
|
||||
func (fs *CryptFs) getSIOConfig(key [32]byte) sio.Config {
|
||||
return sio.Config{
|
||||
MinVersion: sio.Version20,
|
||||
MaxVersion: sio.Version20,
|
||||
Key: key[:],
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertFileInfo returns a FileInfo with the decrypted size
|
||||
func (fs *CryptFs) ConvertFileInfo(info os.FileInfo) os.FileInfo {
|
||||
if !info.Mode().IsRegular() {
|
||||
return info
|
||||
}
|
||||
size := info.Size()
|
||||
if size >= headerV10Size {
|
||||
size -= headerV10Size
|
||||
decryptedSize, err := sio.DecryptedSize(uint64(size))
|
||||
if err == nil {
|
||||
size = int64(decryptedSize)
|
||||
}
|
||||
} else {
|
||||
size = 0
|
||||
}
|
||||
return NewFileInfo(info.Name(), info.IsDir(), size, info.ModTime(), false)
|
||||
}
|
||||
|
||||
func (fs *CryptFs) getFileAndEncryptionKey(name string) (*os.File, [32]byte, error) {
|
||||
var key [32]byte
|
||||
f, err := os.Open(name)
|
||||
if err != nil {
|
||||
return nil, key, err
|
||||
}
|
||||
header := encryptedFileHeader{}
|
||||
err = header.Load(f)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, key, err
|
||||
}
|
||||
kdf := hkdf.New(sha256.New, fs.masterKey, header.nonce, nil)
|
||||
_, err = io.ReadFull(kdf, key[:])
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, key, err
|
||||
}
|
||||
return f, key, err
|
||||
}
|
||||
|
||||
func isZeroBytesDownload(f *os.File, offset int64) (bool, error) {
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if info.Size() == headerV10Size {
|
||||
return true, nil
|
||||
}
|
||||
if info.Size() > headerV10Size {
|
||||
decSize, err := sio.DecryptedSize(uint64(info.Size() - headerV10Size))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if int64(decSize) == offset {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type encryptedFileHeader struct {
|
||||
version byte
|
||||
nonce []byte
|
||||
}
|
||||
|
||||
func (h *encryptedFileHeader) Store(f *os.File) error {
|
||||
buf := make([]byte, 0, headerV10Size)
|
||||
buf = append(buf, version10)
|
||||
buf = append(buf, h.nonce...)
|
||||
_, err := f.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *encryptedFileHeader) Load(f *os.File) error {
|
||||
vers := make([]byte, 1)
|
||||
_, err := io.ReadFull(f, vers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.version = vers[0]
|
||||
if h.version == version10 {
|
||||
nonce := make([]byte, nonceV10Size)
|
||||
_, err := io.ReadFull(f, nonce)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.nonce = nonce
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unsupported encryption version: %v", h.version)
|
||||
}
|
||||
|
||||
type cryptedFileWrapper struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func (w *cryptedFileWrapper) ReadAt(p []byte, offset int64) (n int, err error) {
|
||||
return w.File.ReadAt(p, offset+headerV10Size)
|
||||
}
|
|
@ -215,8 +215,8 @@ func (fs *OsFs) CheckRootPath(username string, uid int, gid int) bool {
|
|||
return (err == nil)
|
||||
}
|
||||
|
||||
// ScanRootDirContents returns the number of files contained in a directory and
|
||||
// their size
|
||||
// ScanRootDirContents returns the number of files contained in the root
|
||||
// directory and their size
|
||||
func (fs *OsFs) ScanRootDirContents() (int, int64, error) {
|
||||
numFiles, size, err := fs.GetDirSize(fs.rootDir)
|
||||
for _, v := range fs.virtualFolders {
|
||||
|
|
26
vfs/vfs.go
26
vfs/vfs.go
|
@ -183,6 +183,11 @@ type AzBlobFsConfig struct {
|
|||
AccessTier string `json:"access_tier,omitempty"`
|
||||
}
|
||||
|
||||
// CryptFsConfig defines the configuration to store local files as encrypted
|
||||
type CryptFsConfig struct {
|
||||
Passphrase *kms.Secret `json:"passphrase,omitempty"`
|
||||
}
|
||||
|
||||
// PipeWriter defines a wrapper for pipeat.PipeWriterAt.
|
||||
type PipeWriter struct {
|
||||
writer *pipeat.PipeWriterAt
|
||||
|
@ -232,11 +237,16 @@ func IsDirectory(fs Fs, path string) (bool, error) {
|
|||
return fileInfo.IsDir(), err
|
||||
}
|
||||
|
||||
// IsLocalOsFs returns true if fs is the local filesystem implementation
|
||||
// IsLocalOsFs returns true if fs is a local filesystem implementation
|
||||
func IsLocalOsFs(fs Fs) bool {
|
||||
return fs.Name() == osFsName
|
||||
}
|
||||
|
||||
// IsCryptOsFs returns true if fs is an encrypted local filesystem implementation
|
||||
func IsCryptOsFs(fs Fs) bool {
|
||||
return fs.Name() == cryptFsName
|
||||
}
|
||||
|
||||
func checkS3Credentials(config *S3FsConfig) error {
|
||||
if config.AccessKey == "" && !config.AccessSecret.IsEmpty() {
|
||||
return errors.New("access_key cannot be empty with access_secret not empty")
|
||||
|
@ -363,6 +373,20 @@ func ValidateAzBlobFsConfig(config *AzBlobFsConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ValidateCryptFsConfig returns nil if the specified CryptFs config is valid, otherwise an error
|
||||
func ValidateCryptFsConfig(config *CryptFsConfig) error {
|
||||
if config.Passphrase == nil || config.Passphrase.IsEmpty() {
|
||||
return errors.New("invalid passphrase")
|
||||
}
|
||||
if !config.Passphrase.IsValidInput() {
|
||||
return errors.New("passphrase cannot be empty or invalid")
|
||||
}
|
||||
if config.Passphrase.IsEncrypted() && !config.Passphrase.IsValid() {
|
||||
return errors.New("invalid encrypted passphrase")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPathPermissions calls fs.Chown.
|
||||
// It does nothing for local filesystem on windows
|
||||
func SetPathPermissions(fs Fs, path string, uid int, gid int) {
|
||||
|
|
|
@ -121,6 +121,9 @@ func (f *webDavFile) Stat() (os.FileInfo, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if vfs.IsCryptOsFs(f.Fs) {
|
||||
info = f.Fs.(*vfs.CryptFs).ConvertFileInfo(info)
|
||||
}
|
||||
fi := &webDavFileInfo{
|
||||
FileInfo: info,
|
||||
Fs: f.Fs,
|
||||
|
@ -211,6 +214,9 @@ func (f *webDavFile) updateStatInfo() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if vfs.IsCryptOsFs(f.Fs) {
|
||||
info = f.Fs.(*vfs.CryptFs).ConvertFileInfo(info)
|
||||
}
|
||||
f.info = info
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -157,7 +157,7 @@ func (c *Connection) getFile(fsPath, virtualPath string) (webdav.File, error) {
|
|||
var cancelFn func()
|
||||
|
||||
// for cloud fs we open the file when we receive the first read to avoid to download the first part of
|
||||
// the file if it was opened only to do a stat or a readdir and so it ins't a download
|
||||
// the file if it was opened only to do a stat or a readdir and so it is not a real download
|
||||
if vfs.IsLocalOsFs(c.Fs) {
|
||||
file, r, cancelFn, err = c.Fs.Open(fsPath, 0)
|
||||
if err != nil {
|
||||
|
|
|
@ -14,10 +14,12 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sio"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/studio-b12/gowebdav"
|
||||
|
@ -283,6 +285,81 @@ func TestBasicHandling(t *testing.T) {
|
|||
assert.Len(t, common.Connections.GetStats(), 0)
|
||||
}
|
||||
|
||||
func TestBasicHandlingCryptFs(t *testing.T) {
|
||||
u := getTestUserWithCryptFs()
|
||||
u.QuotaSize = 6553600
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
assert.NoError(t, checkBasicFunc(client))
|
||||
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
encryptedFileSize, err := getEncryptedFileSize(testFileSize)
|
||||
assert.NoError(t, err)
|
||||
expectedQuotaSize := user.UsedQuotaSize + encryptedFileSize
|
||||
expectedQuotaFiles := user.UsedQuotaFiles + 1
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
// overwrite an existing file
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||
err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
files, err := client.ReadDir("/")
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, files, 1) {
|
||||
assert.Equal(t, testFileSize, files[0].Size())
|
||||
}
|
||||
err = client.Remove(testFileName)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles-1, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize-encryptedFileSize, user.UsedQuotaSize)
|
||||
err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
testDir := "testdir"
|
||||
err = client.Mkdir(testDir, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = client.MkdirAll(path.Join(testDir, "sub", "sub"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = client.MkdirAll(path.Join(testDir, "sub1", "sub1"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = client.MkdirAll(path.Join(testDir, "sub2", "sub2"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(testDir, testFileName+".txt"), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(testDir, testFileName), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
files, err = client.ReadDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, files, 5)
|
||||
for _, f := range files {
|
||||
if strings.HasPrefix(f.Name(), testFileName) {
|
||||
assert.Equal(t, testFileSize, f.Size())
|
||||
} else {
|
||||
assert.True(t, f.IsDir())
|
||||
}
|
||||
}
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, common.Connections.GetStats(), 0)
|
||||
}
|
||||
|
||||
func TestLoginInvalidPwd(t *testing.T) {
|
||||
u := getTestUser()
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
|
@ -940,49 +1017,50 @@ func TestLoginInvalidFs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBytesRangeRequests(t *testing.T) {
|
||||
u := getTestUser()
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFileName := "test_file.txt"
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
fileContent := []byte("test file contents")
|
||||
err = ioutil.WriteFile(testFilePath, fileContent, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
err = uploadFile(testFilePath, testFileName, int64(len(fileContent)), client)
|
||||
assert.NoError(t, err)
|
||||
remotePath := fmt.Sprintf("http://%v/%v/%v", webDavServerAddr, user.Username, testFileName)
|
||||
req, err := http.NewRequest(http.MethodGet, remotePath, nil)
|
||||
if assert.NoError(t, err) {
|
||||
httpClient := httpclient.GetHTTPClient()
|
||||
req.SetBasicAuth(user.Username, defaultPassword)
|
||||
req.Header.Set("Range", "bytes=5-")
|
||||
resp, err := httpClient.Do(req)
|
||||
for _, u := range []dataprovider.User{getTestUser(), getTestUserWithCryptFs()} {
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFileName := "test_file.txt"
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
fileContent := []byte("test file contents")
|
||||
err = ioutil.WriteFile(testFilePath, fileContent, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
err = uploadFile(testFilePath, testFileName, int64(len(fileContent)), client)
|
||||
assert.NoError(t, err)
|
||||
remotePath := fmt.Sprintf("http://%v/%v/%v", webDavServerAddr, user.Username, testFileName)
|
||||
req, err := http.NewRequest(http.MethodGet, remotePath, nil)
|
||||
if assert.NoError(t, err) {
|
||||
defer resp.Body.Close()
|
||||
assert.Equal(t, http.StatusPartialContent, resp.StatusCode)
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "file contents", string(bodyBytes))
|
||||
httpClient := httpclient.GetHTTPClient()
|
||||
req.SetBasicAuth(user.Username, defaultPassword)
|
||||
req.Header.Set("Range", "bytes=5-")
|
||||
resp, err := httpClient.Do(req)
|
||||
if assert.NoError(t, err) {
|
||||
defer resp.Body.Close()
|
||||
assert.Equal(t, http.StatusPartialContent, resp.StatusCode)
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "file contents", string(bodyBytes))
|
||||
}
|
||||
req.Header.Set("Range", "bytes=5-8")
|
||||
resp, err = httpClient.Do(req)
|
||||
if assert.NoError(t, err) {
|
||||
defer resp.Body.Close()
|
||||
assert.Equal(t, http.StatusPartialContent, resp.StatusCode)
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "file", string(bodyBytes))
|
||||
}
|
||||
}
|
||||
req.Header.Set("Range", "bytes=5-8")
|
||||
resp, err = httpClient.Do(req)
|
||||
if assert.NoError(t, err) {
|
||||
defer resp.Body.Close()
|
||||
assert.Equal(t, http.StatusPartialContent, resp.StatusCode)
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "file", string(bodyBytes))
|
||||
}
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGETAsPROPFIND(t *testing.T) {
|
||||
|
@ -1296,6 +1374,18 @@ func getTestUser() dataprovider.User {
|
|||
return user
|
||||
}
|
||||
|
||||
func getTestUserWithCryptFs() dataprovider.User {
|
||||
user := getTestUser()
|
||||
user.FsConfig.Provider = dataprovider.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("testPassphrase")
|
||||
return user
|
||||
}
|
||||
|
||||
func getEncryptedFileSize(size int64) (int64, error) {
|
||||
encSize, err := sio.EncryptedSize(uint64(size))
|
||||
return int64(encSize) + 33, err
|
||||
}
|
||||
|
||||
func getExtAuthScriptContent(user dataprovider.User, nonJSONResponse bool, username string) []byte {
|
||||
extAuthContent := []byte("#!/bin/sh\n\n")
|
||||
extAuthContent = append(extAuthContent, []byte(fmt.Sprintf("if test \"$SFTPGO_AUTHD_USERNAME\" = \"%v\"; then\n", user.Username))...)
|
||||
|
|
Loading…
Reference in a new issue