mirror of
https://github.com/drakkan/sftpgo.git
synced 2024-11-24 08:30:27 +00:00
parent
4d5494912d
commit
a6985075b9
43 changed files with 3556 additions and 1767 deletions
10
README.md
10
README.md
|
@ -38,7 +38,7 @@ It can serve local filesystem, S3 (compatible) Object Storage, Google Cloud Stor
|
|||
- SCP and rsync are supported.
|
||||
- FTP/S is supported. You can configure the FTP service to require TLS for both control and data connections.
|
||||
- [WebDAV](./docs/webdav.md) is supported.
|
||||
- Support for serving local filesystem, S3 Compatible Object Storage and Google Cloud Storage over SFTP/SCP/FTP/WebDAV.
|
||||
- Support for serving local filesystem, S3 Compatible Object Storage, Google Cloud Storage, Azure Blob Storage or another SFTP server over SFTP/SCP/FTP/WebDAV.
|
||||
- Per user protocols restrictions. You can configure the allowed protocols (SSH/FTP/WebDAV) for each user.
|
||||
- [Prometheus metrics](./docs/metrics.md) are exposed.
|
||||
- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP/WebDAV service without losing the information about the client's address.
|
||||
|
@ -201,6 +201,14 @@ Each user can be mapped with a Google Cloud Storage bucket or a bucket virtual f
|
|||
|
||||
Each user can be mapped with an Azure Blob Storage container or a container virtual folder. This way, the mapped container/virtual folder is exposed over SFTP/SCP/FTP/WebDAV. More information about Azure Blob Storage integration can be found [here](./docs/azure-blob-storage.md).
|
||||
|
||||
### SFTP backend
|
||||
|
||||
Each user can be mapped to another SFTP server account or a subfolder of it. More information can be found [here](./docs/sftpfs.md).
|
||||
|
||||
### Encrypted backend
|
||||
|
||||
Data at-rest encryption is supported via the [cryptfs backend](./docs/dare.md).
|
||||
|
||||
### Other Storage backends
|
||||
|
||||
Adding new storage backends is quite easy:
|
||||
|
|
|
@ -67,6 +67,12 @@ var (
|
|||
portableAzULConcurrency int
|
||||
portableAzUseEmulator bool
|
||||
portableCryptPassphrase string
|
||||
portableSFTPEndpoint string
|
||||
portableSFTPUsername string
|
||||
portableSFTPPassword string
|
||||
portableSFTPPrivateKeyPath string
|
||||
portableSFTPFingerprints []string
|
||||
portableSFTPPrefix string
|
||||
portableCmd = &cobra.Command{
|
||||
Use: "portable",
|
||||
Short: "Serve a single directory",
|
||||
|
@ -88,25 +94,25 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
}
|
||||
permissions := make(map[string][]string)
|
||||
permissions["/"] = portablePermissions
|
||||
var portableGCSCredentials []byte
|
||||
if fsProvider == dataprovider.GCSFilesystemProvider && len(portableGCSCredentialsFile) > 0 {
|
||||
fi, err := os.Stat(portableGCSCredentialsFile)
|
||||
portableGCSCredentials := ""
|
||||
if fsProvider == dataprovider.GCSFilesystemProvider && portableGCSCredentialsFile != "" {
|
||||
contents, err := getFileContents(portableGCSCredentialsFile)
|
||||
if err != nil {
|
||||
fmt.Printf("Invalid GCS credentials file: %v\n", err)
|
||||
fmt.Printf("Unable to get GCS credentials: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if fi.Size() > 1048576 {
|
||||
fmt.Printf("Invalid GCS credentials file: %#v is too big %v/1048576 bytes\n", portableGCSCredentialsFile,
|
||||
fi.Size())
|
||||
os.Exit(1)
|
||||
}
|
||||
creds, err := ioutil.ReadFile(portableGCSCredentialsFile)
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to read credentials file: %v\n", err)
|
||||
}
|
||||
portableGCSCredentials = creds
|
||||
portableGCSCredentials = contents
|
||||
portableGCSAutoCredentials = 0
|
||||
}
|
||||
portableSFTPPrivateKey := ""
|
||||
if fsProvider == dataprovider.SFTPFilesystemProvider && portableSFTPPrivateKeyPath != "" {
|
||||
contents, err := getFileContents(portableSFTPPrivateKeyPath)
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to get SFTP private key: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
portableSFTPPrivateKey = contents
|
||||
}
|
||||
if portableFTPDPort >= 0 && len(portableFTPSCert) > 0 && len(portableFTPSKey) > 0 {
|
||||
_, err := common.NewCertManager(portableFTPSCert, portableFTPSKey, "FTP portable")
|
||||
if err != nil {
|
||||
|
@ -157,7 +163,7 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
},
|
||||
GCSConfig: vfs.GCSFsConfig{
|
||||
Bucket: portableGCSBucket,
|
||||
Credentials: kms.NewPlainSecret(string(portableGCSCredentials)),
|
||||
Credentials: kms.NewPlainSecret(portableGCSCredentials),
|
||||
AutomaticCredentials: portableGCSAutoCredentials,
|
||||
StorageClass: portableGCSStorageClass,
|
||||
KeyPrefix: portableGCSKeyPrefix,
|
||||
|
@ -177,6 +183,14 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
CryptConfig: vfs.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(portableCryptPassphrase),
|
||||
},
|
||||
SFTPConfig: vfs.SFTPFsConfig{
|
||||
Endpoint: portableSFTPEndpoint,
|
||||
Username: portableSFTPUsername,
|
||||
Password: kms.NewPlainSecret(portableSFTPPassword),
|
||||
PrivateKey: kms.NewPlainSecret(portableSFTPPrivateKey),
|
||||
Fingerprints: portableSFTPFingerprints,
|
||||
Prefix: portableSFTPPrefix,
|
||||
},
|
||||
},
|
||||
Filters: dataprovider.UserFilters{
|
||||
FilePatterns: parsePatternsFilesFilters(),
|
||||
|
@ -245,7 +259,8 @@ inside the advertised TXT record`)
|
|||
1 => AWS S3 compatible
|
||||
2 => Google Cloud Storage
|
||||
3 => Azure Blob Storage
|
||||
4 => Encrypted local filesystem`)
|
||||
4 => Encrypted local filesystem
|
||||
5 => SFTP`)
|
||||
portableCmd.Flags().StringVar(&portableS3Bucket, "s3-bucket", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3Region, "s3-region", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3AccessKey, "s3-access-key", "", "")
|
||||
|
@ -292,6 +307,16 @@ prefix and its contents`)
|
|||
parallel`)
|
||||
portableCmd.Flags().BoolVar(&portableAzUseEmulator, "az-use-emulator", false, "")
|
||||
portableCmd.Flags().StringVar(&portableCryptPassphrase, "crypto-passphrase", "", `Passphrase for encryption/decryption`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPEndpoint, "sftp-endpoint", "", `SFTP endpoint as host:port for SFTP
|
||||
provider`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPUsername, "sftp-username", "", `SFTP user for SFTP provider`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPPassword, "sftp-password", "", `SFTP password for SFTP provider`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPPrivateKeyPath, "sftp-key-path", "", `SFTP private key path for SFTP provider`)
|
||||
portableCmd.Flags().StringSliceVar(&portableSFTPFingerprints, "sftp-fingerprints", []string{}, `SFTP fingerprints to verify remote host
|
||||
key for SFTP provider`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPPrefix, "sftp-prefix", "", `SFTP prefix allows restrict all
|
||||
operations to a given path within the
|
||||
remote SFTP server`)
|
||||
rootCmd.AddCommand(portableCmd)
|
||||
}
|
||||
|
||||
|
@ -349,3 +374,18 @@ func getPatternsFilterValues(value string) (string, []string) {
|
|||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func getFileContents(name string) (string, error) {
|
||||
fi, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fi.Size() > 1048576 {
|
||||
return "", fmt.Errorf("%#v is too big %v/1048576 bytes", name, fi.Size())
|
||||
}
|
||||
contents, err := ioutil.ReadFile(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(contents), nil
|
||||
}
|
||||
|
|
|
@ -164,6 +164,7 @@ type ActiveConnection interface {
|
|||
AddTransfer(t ActiveTransfer)
|
||||
RemoveTransfer(t ActiveTransfer)
|
||||
GetTransfers() []ConnectionTransfer
|
||||
CloseFS() error
|
||||
}
|
||||
|
||||
// StatAttributes defines the attributes for set stat commands
|
||||
|
@ -433,12 +434,14 @@ func (conns *ActiveConnections) Remove(connectionID string) {
|
|||
|
||||
for idx, conn := range conns.connections {
|
||||
if conn.GetID() == connectionID {
|
||||
err := conn.CloseFS()
|
||||
lastIdx := len(conns.connections) - 1
|
||||
conns.connections[idx] = conns.connections[lastIdx]
|
||||
conns.connections[lastIdx] = nil
|
||||
conns.connections = conns.connections[:lastIdx]
|
||||
metrics.UpdateActiveConnectionsSize(lastIdx)
|
||||
logger.Debug(conn.GetProtocol(), conn.GetID(), "connection removed, num open connections: %v", lastIdx)
|
||||
logger.Debug(conn.GetProtocol(), conn.GetID(), "connection removed, close fs error: %v, num open connections: %v",
|
||||
err, lastIdx)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,6 +100,14 @@ func (c *BaseConnection) GetLastActivity() time.Time {
|
|||
return time.Unix(0, atomic.LoadInt64(&c.lastActivity))
|
||||
}
|
||||
|
||||
// CloseFS closes the underlying fs
|
||||
func (c *BaseConnection) CloseFS() error {
|
||||
if c.Fs != nil {
|
||||
return c.Fs.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddTransfer associates a new transfer to this connection
|
||||
func (c *BaseConnection) AddTransfer(t ActiveTransfer) {
|
||||
c.Lock()
|
||||
|
@ -459,7 +467,7 @@ func (c *BaseConnection) ignoreSetStat() bool {
|
|||
if Config.SetstatMode == 1 {
|
||||
return true
|
||||
}
|
||||
if Config.SetstatMode == 2 && !vfs.IsLocalOsFs(c.Fs) {
|
||||
if Config.SetstatMode == 2 && !vfs.IsLocalOrSFTPFs(c.Fs) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -564,7 +572,7 @@ func (c *BaseConnection) truncateFile(fsPath, virtualPath string, size int64) er
|
|||
initialSize = info.Size()
|
||||
err = c.Fs.Truncate(fsPath, size)
|
||||
}
|
||||
if err == nil && vfs.IsLocalOsFs(c.Fs) {
|
||||
if err == nil && vfs.IsLocalOrSFTPFs(c.Fs) {
|
||||
sizeDiff := initialSize - size
|
||||
vfolder, err := c.User.GetVirtualFolderForPath(path.Dir(virtualPath))
|
||||
if err == nil {
|
||||
|
|
|
@ -2,7 +2,6 @@ package common
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -181,7 +180,7 @@ func (t *BaseTransfer) getUploadFileSize() (int64, error) {
|
|||
fileSize = info.Size()
|
||||
}
|
||||
if vfs.IsCryptOsFs(t.Fs) && t.ErrTransfer != nil {
|
||||
errDelete := os.Remove(t.fsPath)
|
||||
errDelete := t.Connection.Fs.Remove(t.fsPath, false)
|
||||
if errDelete != nil {
|
||||
t.Connection.Log(logger.LevelWarn, "error removing partial crypto file %#v: %v", t.fsPath, errDelete)
|
||||
}
|
||||
|
@ -205,7 +204,7 @@ func (t *BaseTransfer) Close() error {
|
|||
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
|
||||
if t.ErrTransfer == ErrQuotaExceeded && t.File != nil {
|
||||
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
|
||||
err = os.Remove(t.File.Name())
|
||||
err = t.Connection.Fs.Remove(t.File.Name(), false)
|
||||
if err == nil {
|
||||
numFiles--
|
||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
||||
|
@ -215,11 +214,11 @@ func (t *BaseTransfer) Close() error {
|
|||
t.File.Name(), err)
|
||||
} else if t.transferType == TransferUpload && t.File != nil && t.File.Name() != t.fsPath {
|
||||
if t.ErrTransfer == nil || Config.UploadMode == UploadModeAtomicWithResume {
|
||||
err = os.Rename(t.File.Name(), t.fsPath)
|
||||
err = t.Connection.Fs.Rename(t.File.Name(), t.fsPath)
|
||||
t.Connection.Log(logger.LevelDebug, "atomic upload completed, rename: %#v -> %#v, error: %v",
|
||||
t.File.Name(), t.fsPath, err)
|
||||
} else {
|
||||
err = os.Remove(t.File.Name())
|
||||
err = t.Connection.Fs.Remove(t.File.Name(), false)
|
||||
t.Connection.Log(logger.LevelWarn, "atomic upload completed with error: \"%v\", delete temporary file: %#v, "+
|
||||
"deletion error: %v", t.ErrTransfer, t.File.Name(), err)
|
||||
if err == nil {
|
||||
|
|
|
@ -277,8 +277,10 @@ func convertFsConfigToV4(fs Filesystem, username string) (compatFilesystemV4, er
|
|||
}
|
||||
fsV4.GCSConfig.Credentials = []byte(creds)
|
||||
}
|
||||
case CryptedFilesystemProvider:
|
||||
// crypted provider was not supported in v4, the configuration will be lost
|
||||
default:
|
||||
// a provider not supported in v4, the configuration will be lost
|
||||
providerLog(logger.LevelWarn, "provider %v was not supported in v4, the configuration for the user %#v will be lost",
|
||||
fs.Provider, username)
|
||||
fsV4.Provider = 0
|
||||
}
|
||||
return fsV4, nil
|
||||
|
|
|
@ -831,9 +831,11 @@ func createProvider(basePath string) error {
|
|||
}
|
||||
|
||||
func buildUserHomeDir(user *User) {
|
||||
if len(user.HomeDir) == 0 {
|
||||
if len(config.UsersBaseDir) > 0 {
|
||||
if user.HomeDir == "" {
|
||||
if config.UsersBaseDir != "" {
|
||||
user.HomeDir = filepath.Join(config.UsersBaseDir, user.Username)
|
||||
} else if user.FsConfig.Provider == SFTPFilesystemProvider {
|
||||
user.HomeDir = filepath.Join(os.TempDir(), user.Username)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1166,61 +1168,61 @@ func saveGCSCredentials(user *User) error {
|
|||
|
||||
func validateFilesystemConfig(user *User) error {
|
||||
if user.FsConfig.Provider == S3FilesystemProvider {
|
||||
err := vfs.ValidateS3FsConfig(&user.FsConfig.S3Config)
|
||||
if err != nil {
|
||||
if err := user.FsConfig.S3Config.Validate(); err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not validate s3config: %v", err)}
|
||||
}
|
||||
if user.FsConfig.S3Config.AccessSecret.IsPlain() {
|
||||
user.FsConfig.S3Config.AccessSecret.SetAdditionalData(user.Username)
|
||||
err = user.FsConfig.S3Config.AccessSecret.Encrypt()
|
||||
if err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not encrypt s3 access secret: %v", err)}
|
||||
}
|
||||
if err := user.FsConfig.S3Config.EncryptCredentials(user.Username); err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not encrypt s3 access secret: %v", err)}
|
||||
}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
user.FsConfig.SFTPConfig = vfs.SFTPFsConfig{}
|
||||
return nil
|
||||
} else if user.FsConfig.Provider == GCSFilesystemProvider {
|
||||
err := vfs.ValidateGCSFsConfig(&user.FsConfig.GCSConfig, user.getGCSCredentialsFilePath())
|
||||
if err != nil {
|
||||
if err := user.FsConfig.GCSConfig.Validate(user.getGCSCredentialsFilePath()); err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not validate GCS config: %v", err)}
|
||||
}
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
user.FsConfig.SFTPConfig = vfs.SFTPFsConfig{}
|
||||
return nil
|
||||
} else if user.FsConfig.Provider == AzureBlobFilesystemProvider {
|
||||
err := vfs.ValidateAzBlobFsConfig(&user.FsConfig.AzBlobConfig)
|
||||
if err != nil {
|
||||
if err := user.FsConfig.AzBlobConfig.Validate(); err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not validate Azure Blob config: %v", err)}
|
||||
}
|
||||
if user.FsConfig.AzBlobConfig.AccountKey.IsPlain() {
|
||||
user.FsConfig.AzBlobConfig.AccountKey.SetAdditionalData(user.Username)
|
||||
err = user.FsConfig.AzBlobConfig.AccountKey.Encrypt()
|
||||
if err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not encrypt Azure blob account key: %v", err)}
|
||||
}
|
||||
if err := user.FsConfig.AzBlobConfig.EncryptCredentials(user.Username); err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not encrypt Azure blob account key: %v", err)}
|
||||
}
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
user.FsConfig.SFTPConfig = vfs.SFTPFsConfig{}
|
||||
return nil
|
||||
} else if user.FsConfig.Provider == CryptedFilesystemProvider {
|
||||
err := vfs.ValidateCryptFsConfig(&user.FsConfig.CryptConfig)
|
||||
if err != nil {
|
||||
if err := user.FsConfig.CryptConfig.Validate(); err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not validate Crypt fs config: %v", err)}
|
||||
}
|
||||
if user.FsConfig.CryptConfig.Passphrase.IsPlain() {
|
||||
user.FsConfig.CryptConfig.Passphrase.SetAdditionalData(user.Username)
|
||||
err = user.FsConfig.CryptConfig.Passphrase.Encrypt()
|
||||
if err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not encrypt Crypt fs passphrase: %v", err)}
|
||||
}
|
||||
if err := user.FsConfig.CryptConfig.EncryptCredentials(user.Username); err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not encrypt Crypt fs passphrase: %v", err)}
|
||||
}
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.SFTPConfig = vfs.SFTPFsConfig{}
|
||||
return nil
|
||||
} else if user.FsConfig.Provider == SFTPFilesystemProvider {
|
||||
if err := user.FsConfig.SFTPConfig.Validate(); err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not validate SFTP fs config: %v", err)}
|
||||
}
|
||||
if err := user.FsConfig.SFTPConfig.EncryptCredentials(user.Username); err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("could not encrypt SFTP fs credentials: %v", err)}
|
||||
}
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
return nil
|
||||
}
|
||||
user.FsConfig.Provider = LocalFilesystemProvider
|
||||
|
@ -1228,6 +1230,7 @@ func validateFilesystemConfig(user *User) error {
|
|||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
user.FsConfig.SFTPConfig = vfs.SFTPFsConfig{}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -156,6 +156,7 @@ const (
|
|||
GCSFilesystemProvider // Google Cloud Storage
|
||||
AzureBlobFilesystemProvider // Azure Blob Storage
|
||||
CryptedFilesystemProvider // Local encrypted
|
||||
SFTPFilesystemProvider // SFTP
|
||||
)
|
||||
|
||||
// Filesystem defines cloud storage filesystem details
|
||||
|
@ -165,6 +166,7 @@ type Filesystem struct {
|
|||
GCSConfig vfs.GCSFsConfig `json:"gcsconfig,omitempty"`
|
||||
AzBlobConfig vfs.AzBlobFsConfig `json:"azblobconfig,omitempty"`
|
||||
CryptConfig vfs.CryptFsConfig `json:"cryptconfig,omitempty"`
|
||||
SFTPConfig vfs.SFTPFsConfig `json:"sftpconfig,omitempty"`
|
||||
}
|
||||
|
||||
// User defines a SFTPGo user
|
||||
|
@ -234,6 +236,8 @@ func (u *User) GetFilesystem(connectionID string) (vfs.Fs, error) {
|
|||
return vfs.NewAzBlobFs(connectionID, u.GetHomeDir(), u.FsConfig.AzBlobConfig)
|
||||
case CryptedFilesystemProvider:
|
||||
return vfs.NewCryptFs(connectionID, u.GetHomeDir(), u.FsConfig.CryptConfig)
|
||||
case SFTPFilesystemProvider:
|
||||
return vfs.NewSFTPFs(connectionID, u.FsConfig.SFTPConfig)
|
||||
default:
|
||||
return vfs.NewOsFs(connectionID, u.GetHomeDir(), u.VirtualFolders), nil
|
||||
}
|
||||
|
@ -251,6 +255,9 @@ func (u *User) HideConfidentialData() {
|
|||
u.FsConfig.AzBlobConfig.AccountKey.Hide()
|
||||
case CryptedFilesystemProvider:
|
||||
u.FsConfig.CryptConfig.Passphrase.Hide()
|
||||
case SFTPFilesystemProvider:
|
||||
u.FsConfig.SFTPConfig.Password.Hide()
|
||||
u.FsConfig.SFTPConfig.PrivateKey.Hide()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -716,12 +723,17 @@ func (u *User) GetInfoString() string {
|
|||
t := utils.GetTimeFromMsecSinceEpoch(u.LastLogin)
|
||||
result += fmt.Sprintf("Last login: %v ", t.Format("2006-01-02 15:04:05")) // YYYY-MM-DD HH:MM:SS
|
||||
}
|
||||
if u.FsConfig.Provider == S3FilesystemProvider {
|
||||
switch u.FsConfig.Provider {
|
||||
case S3FilesystemProvider:
|
||||
result += "Storage: S3 "
|
||||
} else if u.FsConfig.Provider == GCSFilesystemProvider {
|
||||
case GCSFilesystemProvider:
|
||||
result += "Storage: GCS "
|
||||
} else if u.FsConfig.Provider == AzureBlobFilesystemProvider {
|
||||
case AzureBlobFilesystemProvider:
|
||||
result += "Storage: Azure "
|
||||
case CryptedFilesystemProvider:
|
||||
result += "Storage: Encrypted "
|
||||
case SFTPFilesystemProvider:
|
||||
result += "Storage: SFTP "
|
||||
}
|
||||
if len(u.PublicKeys) > 0 {
|
||||
result += fmt.Sprintf("Public keys: %v ", len(u.PublicKeys))
|
||||
|
@ -791,6 +803,12 @@ func (u *User) SetEmptySecretsIfNil() {
|
|||
if u.FsConfig.CryptConfig.Passphrase == nil {
|
||||
u.FsConfig.CryptConfig.Passphrase = kms.NewEmptySecret()
|
||||
}
|
||||
if u.FsConfig.SFTPConfig.Password == nil {
|
||||
u.FsConfig.SFTPConfig.Password = kms.NewEmptySecret()
|
||||
}
|
||||
if u.FsConfig.SFTPConfig.PrivateKey == nil {
|
||||
u.FsConfig.SFTPConfig.PrivateKey = kms.NewEmptySecret()
|
||||
}
|
||||
}
|
||||
|
||||
func (u *User) getACopy() User {
|
||||
|
@ -855,7 +873,15 @@ func (u *User) getACopy() User {
|
|||
CryptConfig: vfs.CryptFsConfig{
|
||||
Passphrase: u.FsConfig.CryptConfig.Passphrase.Clone(),
|
||||
},
|
||||
SFTPConfig: vfs.SFTPFsConfig{
|
||||
Endpoint: u.FsConfig.SFTPConfig.Endpoint,
|
||||
Username: u.FsConfig.SFTPConfig.Username,
|
||||
Password: u.FsConfig.SFTPConfig.Password.Clone(),
|
||||
PrivateKey: u.FsConfig.SFTPConfig.PrivateKey.Clone(),
|
||||
Prefix: u.FsConfig.SFTPConfig.Prefix,
|
||||
},
|
||||
}
|
||||
copy(fsConfig.SFTPConfig.Fingerprints, u.FsConfig.SFTPConfig.Fingerprints)
|
||||
|
||||
return User{
|
||||
ID: u.ID,
|
||||
|
|
|
@ -1,89 +1,5 @@
|
|||
# Account's configuration properties
|
||||
|
||||
For each account, the following properties can be configured:
|
||||
|
||||
- `username`
|
||||
- `password` used for password authentication. For users created using SFTPGo REST API, if the password has no known hashing algo prefix, it will be stored using argon2id. SFTPGo supports checking passwords stored with bcrypt, pbkdf2, md5crypt and sha512crypt too. For pbkdf2 the supported format is `$<algo>$<iterations>$<salt>$<hashed pwd base64 encoded>`, where algo is `pbkdf2-sha1` or `pbkdf2-sha256` or `pbkdf2-sha512` or `$pbkdf2-b64salt-sha256$`. For example the `pbkdf2-sha256` of the word `password` using 150000 iterations and `E86a9YMX3zC7` as salt must be stored as `$pbkdf2-sha256$150000$E86a9YMX3zC7$R5J62hsSq+pYw00hLLPKBbcGXmq7fj5+/M0IFoYtZbo=`. In pbkdf2 variant with `b64salt` the salt is base64 encoded. For bcrypt the format must be the one supported by golang's [crypto/bcrypt](https://godoc.org/golang.org/x/crypto/bcrypt) package, for example the password `secret` with cost `14` must be stored as `$2a$14$ajq8Q7fbtFRQvXpdCq7Jcuy.Rx1h/L4J60Otx.gyNLbAYctGMJ9tK`. For md5crypt and sha512crypt we support the format used in `/etc/shadow` with the `$1$` and `$6$` prefix, this is useful if you are migrating from Unix system user accounts. We support Apache md5crypt (`$apr1$` prefix) too. Using the REST API you can send a password hashed as bcrypt, pbkdf2, md5crypt or sha512crypt and it will be stored as is.
|
||||
- `public_keys` array of public keys. At least one public key or the password is mandatory.
|
||||
- `status` 1 means "active", 0 "inactive". An inactive account cannot login.
|
||||
- `expiration_date` expiration date as unix timestamp in milliseconds. An expired account cannot login. 0 means no expiration.
|
||||
- `home_dir` the user cannot upload or download files outside this directory. Must be an absolute path. A local home directory is required for Cloud Storage Backends too: in this case it will store temporary files.
|
||||
- `virtual_folders` list of mappings between virtual SFTP/SCP paths and local filesystem paths outside the user home directory. More information can be found [here](./virtual-folders.md)
|
||||
- `uid`, `gid`. If SFTPGo runs as root system user then the created files and directories will be assigned to this system uid/gid. Ignored on windows or if SFTPGo runs as non root user: in this case files and directories for all SFTP users will be owned by the system user that runs SFTPGo.
|
||||
- `max_sessions` maximum concurrent sessions. 0 means unlimited.
|
||||
- `quota_size` maximum size allowed as bytes. 0 means unlimited.
|
||||
- `quota_files` maximum number of files allowed. 0 means unlimited.
|
||||
- `permissions` for SFTP paths. The following per directory permissions are supported:
|
||||
- `*` all permissions are granted
|
||||
- `list` list items is allowed
|
||||
- `download` download files is allowed
|
||||
- `upload` upload files is allowed
|
||||
- `overwrite` overwrite an existing file, while uploading, is allowed. `upload` permission is required to allow file overwrite
|
||||
- `delete` delete files or directories is allowed
|
||||
- `rename` rename a file or a directory is allowed if this permission is granted on source and target path. You can enable rename in a more controlled way granting `delete` permission on source directory and `upload`/`create_dirs`/`create_symlinks` permissions on target directory
|
||||
- `create_dirs` create directories is allowed
|
||||
- `create_symlinks` create symbolic links is allowed
|
||||
- `chmod` changing file or directory permissions is allowed. On Windows, only the 0200 bit (owner writable) of mode is used; it controls whether the file's read-only attribute is set or cleared. The other bits are currently unused. Use mode 0400 for a read-only file and 0600 for a readable+writable file.
|
||||
- `chown` changing file or directory owner and group is allowed. Changing owner and group is not supported on Windows.
|
||||
- `chtimes` changing file or directory access and modification time is allowed
|
||||
- `upload_bandwidth` maximum upload bandwidth as KB/s, 0 means unlimited.
|
||||
- `download_bandwidth` maximum download bandwidth as KB/s, 0 means unlimited.
|
||||
- `last_login` last user login as unix timestamp in milliseconds. It is saved at most once every 10 minutes
|
||||
- `allowed_ip`, List of IP/Mask allowed to login. Any IP address not contained in this list cannot login. IP/Mask must be in CIDR notation as defined in RFC 4632 and RFC 4291, for example "192.0.2.0/24" or "2001:db8::/32"
|
||||
- `denied_ip`, List of IP/Mask not allowed to login. If an IP address is both allowed and denied then login will be denied
|
||||
- `max_upload_file_size`, max allowed size, as bytes, for a single file upload. The upload will be aborted if/when the size of the file being sent exceeds this limit. 0 means unlimited. This restriction does not apply for SSH system commands such as `git` and `rsync`
|
||||
- `denied_login_methods`, List of login methods not allowed. To enable multi-step authentication you have to allow only multi-step login methods. If password login method is denied or no password is set then FTP and WebDAV users cannot login. The following login methods are supported:
|
||||
- `publickey`
|
||||
- `password`
|
||||
- `keyboard-interactive`
|
||||
- `publickey+password`
|
||||
- `publickey+keyboard-interactive`
|
||||
- `denied_protocols`, list of protocols not allowed. The following protocols are supported:
|
||||
- `SSH`
|
||||
- `FTP`
|
||||
- `DAV`
|
||||
- `file_extensions`, list of struct. Deprecated, please use `file_patterns`. These restrictions do not apply to files listing for performance reasons, so a denied file cannot be downloaded/overwritten/renamed but it will still be in the list of files. Please note that these restrictions can be easily bypassed. Each struct contains the following fields:
|
||||
- `allowed_extensions`, list of, case insensitive, allowed file extensions. Shell like expansion is not supported so you have to specify `.jpg` and not `*.jpg`. Any file that does not end with this suffix will be denied
|
||||
- `denied_extensions`, list of, case insensitive, denied file extensions. Denied file extensions are evaluated before the allowed ones
|
||||
- `path`, exposed virtual path, if no other specific filter is defined, the filter apply for sub directories too. For example if filters are defined for the paths `/` and `/sub` then the filters for `/` are applied for any file outside the `/sub` directory
|
||||
- `file_patterns`, list of struct. These restrictions do not apply to files listing for performance reasons, so a denied file cannot be downloaded/overwritten/renamed but it will still be in the list of files. Please note that these restrictions can be easily bypassed. For syntax details take a look [here](https://golang.org/pkg/path/#Match). Each struct contains the following fields:
|
||||
- `allowed_patterns`, list of, case insensitive, allowed file patterns. Examples: `*.jpg`, `a*b?.png`. Any non matching file will be denied
|
||||
- `denied_patterns`, list of, case insensitive, denied file patterns. Denied file patterns are evaluated before the allowed ones
|
||||
- `path`, exposed virtual path, if no other specific filter is defined, the filter apply for sub directories too. For example if filters are defined for the paths `/` and `/sub` then the filters for `/` are applied for any file outside the `/sub` directory
|
||||
- `fs_provider`, filesystem to serve via SFTP. Local filesystem (0), S3 Compatible Object Storage (1), Google Cloud Storage (2), Azure Blob Storage (3) and encrypted local filesystem (4) are supported
|
||||
- `s3_bucket`, required for S3 filesystem
|
||||
- `s3_region`, required for S3 filesystem. Must match the region for your bucket. You can find here the list of available [AWS regions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). For example if your bucket is at `Frankfurt` you have to set the region to `eu-central-1`
|
||||
- `s3_access_key`
|
||||
- `s3_access_secret`, if provided it is stored encrypted based on kms configuration. You can leave access key and access secret blank to use credentials from environment
|
||||
- `s3_endpoint`, specifies a S3 endpoint (server) different from AWS. It is not required if you are connecting to AWS
|
||||
- `s3_storage_class`, leave blank to use the default or specify a valid AWS [storage class](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
|
||||
- `s3_key_prefix`, allows to restrict access to the folder identified by this prefix and its contents
|
||||
- `s3_upload_part_size`, the buffer size for multipart uploads (MB). Zero means the default (5 MB). Minimum is 5
|
||||
- `s3_upload_concurrency` how many parts are uploaded in parallel
|
||||
- `gcs_bucket`, required for GCS filesystem
|
||||
- `gcs_credentials`, Google Cloud Storage JSON credentials base64 encoded. Credentials are stored encrypted based on kms configuration
|
||||
- `gcs_automatic_credentials`, integer. Set to 1 to use Application Default Credentials strategy or set to 0 to use explicit credentials via `gcs_credentials`
|
||||
- `gcs_storage_class`
|
||||
- `gcs_key_prefix`, allows to restrict access to the folder identified by this prefix and its contents
|
||||
- `az_container`, Azure Blob Storage container
|
||||
- `az_account_name`, Azure account name. leave blank to use SAS URL
|
||||
- `az_account_key`, Azure account key. leave blank to use SAS URL. If provided it is stored encrypted based on kms configuration
|
||||
- `az_sas_url`, Azure shared access signature URL
|
||||
- `az_endpoint`, Default is "blob.core.windows.net". If you use the emulator the endpoint must include the protocol, for example "http://127.0.0.1:10000"
|
||||
- `az_upload_part_size`, the buffer size for multipart uploads (MB). Zero means the default (4 MB)
|
||||
- `az_upload_concurrency`, how many parts are uploaded in parallel. Zero means the default (2)
|
||||
- `az_key_prefix`, allows to restrict access to the folder identified by this prefix and its contents
|
||||
- `az_use_emulator`, boolean
|
||||
- `crypt_passphrase`, passphrase to use for local encryption
|
||||
- `additional_info`, string. Free text field
|
||||
|
||||
These properties are stored inside the data provider.
|
||||
|
||||
If you want to use your existing accounts, you have these options:
|
||||
|
||||
- you can import your users inside SFTPGo. Take a look at [sftpgo_api_cli](../examples/rest-api-cli#convert-users-from-other-stores "SFTPGo API CLI example"), it can convert and import users from Linux system users and Pure-FTPd/ProFTPD virtual users
|
||||
- you can use an external authentication program
|
||||
|
||||
Please take a look at the [OpenAPI schema](../httpd/schema/openapi.yaml) for the exact definitions of user and folder fields.
|
||||
If you need an example you can export a dump using the REST API CLI client or by invoking the `dumpdata` endpoint directly, for example:
|
||||
|
||||
|
@ -92,3 +8,12 @@ curl "http://127.0.0.1:8080/api/v1/dumpdata?output_file=dump.json&indent=1"
|
|||
```
|
||||
|
||||
the dump is a JSON with users and folder.
|
||||
|
||||
These properties are stored inside the configured data provider.
|
||||
|
||||
SFTPGo supports checking passwords stored with bcrypt, pbkdf2, md5crypt and sha512crypt too. For pbkdf2 the supported format is `$<algo>$<iterations>$<salt>$<hashed pwd base64 encoded>`, where algo is `pbkdf2-sha1` or `pbkdf2-sha256` or `pbkdf2-sha512` or `$pbkdf2-b64salt-sha256$`. For example the pbkdf2-sha256 of the word password using 150000 iterations and E86a9YMX3zC7 as salt must be stored as `$pbkdf2-sha256$150000$E86a9YMX3zC7$R5J62hsSq+pYw00hLLPKBbcGXmq7fj5+/M0IFoYtZbo=`. In pbkdf2 variant with b64salt the salt is base64 encoded. For bcrypt the format must be the one supported by golang's crypto/bcrypt package, for example the password secret with cost 14 must be stored as `$2a$14$ajq8Q7fbtFRQvXpdCq7Jcuy.Rx1h/L4J60Otx.gyNLbAYctGMJ9tK`. For md5crypt and sha512crypt we support the format used in `/etc/shadow` with the `$1$` and `$6$` prefix, this is useful if you are migrating from Unix system user accounts. We support Apache md5crypt (`$apr1$` prefix) too. Using the REST API you can send a password hashed as bcrypt, pbkdf2, md5crypt or sha512crypt and it will be stored as is.
|
||||
|
||||
If you want to use your existing accounts, you have these options:
|
||||
|
||||
- you can import your users inside SFTPGo. Take a look at [sftpgo_api_cli](../examples/rest-api-cli#convert-users-from-other-stores "SFTPGo API CLI example"), it can convert and import users from Linux system users and Pure-FTPd/ProFTPD virtual users
|
||||
- you can use an external authentication program
|
||||
|
|
|
@ -55,6 +55,7 @@ Flags:
|
|||
2 => Google Cloud Storage
|
||||
3 => Azure Blob Storage
|
||||
4 => Encrypted local filesystem
|
||||
5 => SFTP
|
||||
--ftpd-cert string Path to the certificate file for FTPS
|
||||
--ftpd-key string Path to the key file for FTPS
|
||||
--ftpd-port int 0 means a random unprivileged port,
|
||||
|
@ -90,6 +91,16 @@ Flags:
|
|||
parallel (default 2)
|
||||
--s3-upload-part-size int The buffer size for multipart uploads
|
||||
(MB) (default 5)
|
||||
--sftp-endpoint string SFTP endpoint as host:port for SFTP
|
||||
provider
|
||||
--sftp-fingerprints strings SFTP fingerprints to verify remote host
|
||||
key for SFTP provider
|
||||
--sftp-key-path string SFTP private key path for SFTP provider
|
||||
--sftp-password string SFTP password for SFTP provider
|
||||
--sftp-prefix string SFTP prefix allows restrict all
|
||||
operations to a given path within the
|
||||
remote SFTP server
|
||||
--sftp-username string SFTP user for SFTP provider
|
||||
-s, --sftpd-port int 0 means a random unprivileged port,
|
||||
< 0 disabled
|
||||
-c, --ssh-commands strings SSH commands to enable.
|
||||
|
|
30
docs/sftpfs.md
Normal file
30
docs/sftpfs.md
Normal file
|
@ -0,0 +1,30 @@
|
|||
# SFTP as storage backend
|
||||
|
||||
An SFTP account on another server can be used as storage for an SFTPGo account, so the remote SFTP server can be accessed in a similar way to the local file system.
|
||||
|
||||
Here are the supported configuration parameters:
|
||||
|
||||
- `Endpoint`, ssh endpoint as `host:port`
|
||||
- `Username`
|
||||
- `Password`
|
||||
- `PrivateKey`
|
||||
- `Fingerprints`
|
||||
- `Prefix`
|
||||
|
||||
The mandatory parameters are the endpoint, the username and a password or a private key. If you define both a password and a private key the key is tried first. The provided private key should be PEM encoded, something like this:
|
||||
|
||||
```shell
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACA8LWc4SahqKkAr4L3rS19w1Vt8/IAf4th2FZmf+PJ/vwAAAJBvnZIJb52S
|
||||
CQAAAAtzc2gtZWQyNTUxOQAAACA8LWc4SahqKkAr4L3rS19w1Vt8/IAf4th2FZmf+PJ/vw
|
||||
AAAEBE6F5Az4wzNfNYLRdG8blDwvPBYFXE8BYDi4gzIhnd9zwtZzhJqGoqQCvgvetLX3DV
|
||||
W3z8gB/i2HYVmZ/48n+/AAAACW5pY29sYUBwMQECAwQ=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
```
|
||||
|
||||
The password and the private key are stored as ciphertext according to your [KMS configuration](./kms.md).
|
||||
|
||||
SHA256 fingerprints for remote server host keys are optional but highly recommended: if you provide one or more fingerprints the server host key will be verified against them and the connection will be denied if none of the fingerprints provided match that for the server host key.
|
||||
|
||||
Specifying a prefix you can restrict all operations to a given path within the remote SFTP server.
|
|
@ -78,6 +78,10 @@ Output:
|
|||
"storage_class": "Standard",
|
||||
"upload_concurrency": 4,
|
||||
"upload_part_size": 10
|
||||
},
|
||||
"sftpconfig": {
|
||||
"password": {},
|
||||
"private_key": {}
|
||||
}
|
||||
},
|
||||
"filters": {
|
||||
|
@ -198,6 +202,10 @@ Output:
|
|||
"provider": 0,
|
||||
"s3config": {
|
||||
"access_secret": {}
|
||||
},
|
||||
"sftpconfig": {
|
||||
"password": {},
|
||||
"private_key": {}
|
||||
}
|
||||
},
|
||||
"filters": {
|
||||
|
@ -274,12 +282,19 @@ Output:
|
|||
"azblobconfig": {
|
||||
"account_key": {}
|
||||
},
|
||||
"cryptconfig": {
|
||||
"passphrase": {}
|
||||
},
|
||||
"gcsconfig": {
|
||||
"credentials": {}
|
||||
},
|
||||
"provider": 0,
|
||||
"s3config": {
|
||||
"access_secret": {}
|
||||
},
|
||||
"sftpconfig": {
|
||||
"password": {},
|
||||
"private_key": {}
|
||||
}
|
||||
},
|
||||
"filters": {
|
||||
|
|
|
@ -83,7 +83,8 @@ class SFTPGoApiRequests:
|
|||
denied_patterns=[], allowed_patterns=[], s3_upload_part_size=0, s3_upload_concurrency=0,
|
||||
max_upload_file_size=0, denied_protocols=[], az_container='', az_account_name='', az_account_key='',
|
||||
az_sas_url='', az_endpoint='', az_upload_part_size=0, az_upload_concurrency=0, az_key_prefix='',
|
||||
az_use_emulator=False, az_access_tier='', additional_info='', crypto_passphrase=''):
|
||||
az_use_emulator=False, az_access_tier='', additional_info='', crypto_passphrase='', sftp_endpoint='',
|
||||
sftp_username='', sftp_password='', sftp_private_key_path='', sftp_fingerprints=[], sftp_prefix=''):
|
||||
user = {'id':user_id, 'username':username, 'uid':uid, 'gid':gid,
|
||||
'max_sessions':max_sessions, 'quota_size':quota_size, 'quota_files':quota_files,
|
||||
'upload_bandwidth':upload_bandwidth, 'download_bandwidth':download_bandwidth,
|
||||
|
@ -110,7 +111,9 @@ class SFTPGoApiRequests:
|
|||
gcs_automatic_credentials, s3_upload_part_size, s3_upload_concurrency,
|
||||
az_container, az_account_name, az_account_key, az_sas_url,
|
||||
az_endpoint, az_upload_part_size, az_upload_concurrency, az_key_prefix,
|
||||
az_use_emulator, az_access_tier, crypto_passphrase)})
|
||||
az_use_emulator, az_access_tier, crypto_passphrase, sftp_endpoint,
|
||||
sftp_username, sftp_password, sftp_private_key_path,
|
||||
sftp_fingerprints, sftp_prefix)})
|
||||
return user
|
||||
|
||||
def buildVirtualFolders(self, vfolders):
|
||||
|
@ -234,7 +237,8 @@ class SFTPGoApiRequests:
|
|||
s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class,
|
||||
gcs_credentials_file, gcs_automatic_credentials, s3_upload_part_size, s3_upload_concurrency,
|
||||
az_container, az_account_name, az_account_key, az_sas_url, az_endpoint, az_upload_part_size,
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, crypto_passphrase):
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, crypto_passphrase,
|
||||
sftp_endpoint, sftp_username, sftp_password, sftp_private_key_path, sftp_fingerprints, sftp_prefix):
|
||||
fs_config = {'provider':0}
|
||||
if fs_provider == 'S3':
|
||||
secret = {}
|
||||
|
@ -265,9 +269,20 @@ class SFTPGoApiRequests:
|
|||
'upload_concurrency':az_upload_concurrency, 'key_prefix':az_key_prefix, 'use_emulator':
|
||||
az_use_emulator, 'access_tier':az_access_tier}
|
||||
fs_config.update({'provider':3, 'azblobconfig':azureconfig})
|
||||
elif fs_provider == "Crypto":
|
||||
cryptoconfig = {"passphrase":{"status":"Plain", "payload":crypto_passphrase}}
|
||||
elif fs_provider == 'Crypto':
|
||||
cryptoconfig = {'passphrase':{'status':'Plain', 'payload':crypto_passphrase}}
|
||||
fs_config.update({'provider':4, 'cryptconfig':cryptoconfig})
|
||||
elif fs_provider == 'SFTP':
|
||||
sftpconfig = {'endpoint':sftp_endpoint, 'username':sftp_username, 'fingerprints':sftp_fingerprints,
|
||||
'prefix':sftp_prefix}
|
||||
if sftp_password:
|
||||
pwd = {'status':'Plain', 'payload':sftp_password}
|
||||
sftpconfig.update({'password':pwd})
|
||||
if sftp_private_key_path:
|
||||
with open(sftp_private_key_path) as pkey:
|
||||
key = {'status':'Plain', 'payload':pkey.read()}
|
||||
sftpconfig.update({'private_key':key})
|
||||
fs_config.update({'provider':5, 'sftpconfig':sftpconfig})
|
||||
return fs_config
|
||||
|
||||
def getUsers(self, limit=100, offset=0, order='ASC', username=''):
|
||||
|
@ -288,7 +303,8 @@ class SFTPGoApiRequests:
|
|||
s3_upload_part_size=0, s3_upload_concurrency=0, max_upload_file_size=0, denied_protocols=[], az_container="",
|
||||
az_account_name='', az_account_key='', az_sas_url='', az_endpoint='', az_upload_part_size=0,
|
||||
az_upload_concurrency=0, az_key_prefix='', az_use_emulator=False, az_access_tier='', additional_info='',
|
||||
crypto_passphrase=''):
|
||||
crypto_passphrase='', sftp_endpoint='', sftp_username='', sftp_password='', sftp_private_key_path='',
|
||||
sftp_fingerprints=[], sftp_prefix=''):
|
||||
u = self.buildUserObject(0, username, password, public_keys, home_dir, uid, gid, max_sessions,
|
||||
quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth,
|
||||
status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key,
|
||||
|
@ -296,7 +312,8 @@ class SFTPGoApiRequests:
|
|||
gcs_credentials_file, gcs_automatic_credentials, denied_login_methods, virtual_folders, denied_patterns,
|
||||
allowed_patterns, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size, denied_protocols,
|
||||
az_container, az_account_name, az_account_key, az_sas_url, az_endpoint, az_upload_part_size,
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, additional_info, crypto_passphrase)
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, additional_info, crypto_passphrase,
|
||||
sftp_endpoint, sftp_username, sftp_password, sftp_private_key_path, sftp_fingerprints, sftp_prefix)
|
||||
r = requests.post(self.userPath, json=u, auth=self.auth, verify=self.verify)
|
||||
self.printResponse(r)
|
||||
|
||||
|
@ -309,7 +326,8 @@ class SFTPGoApiRequests:
|
|||
allowed_patterns=[], s3_upload_part_size=0, s3_upload_concurrency=0, max_upload_file_size=0,
|
||||
denied_protocols=[], disconnect=0, az_container='', az_account_name='', az_account_key='', az_sas_url='',
|
||||
az_endpoint='', az_upload_part_size=0, az_upload_concurrency=0, az_key_prefix='', az_use_emulator=False,
|
||||
az_access_tier='', additional_info='', crypto_passphrase=''):
|
||||
az_access_tier='', additional_info='', crypto_passphrase='', sftp_endpoint='', sftp_username='',
|
||||
sftp_password='', sftp_private_key_path='', sftp_fingerprints=[], sftp_prefix=''):
|
||||
u = self.buildUserObject(user_id, username, password, public_keys, home_dir, uid, gid, max_sessions,
|
||||
quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth,
|
||||
status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key,
|
||||
|
@ -317,7 +335,8 @@ class SFTPGoApiRequests:
|
|||
gcs_credentials_file, gcs_automatic_credentials, denied_login_methods, virtual_folders, denied_patterns,
|
||||
allowed_patterns, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size, denied_protocols,
|
||||
az_container, az_account_name, az_account_key, az_sas_url, az_endpoint, az_upload_part_size,
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, additional_info, crypto_passphrase)
|
||||
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier, additional_info, crypto_passphrase,
|
||||
sftp_endpoint, sftp_username, sftp_password, sftp_private_key_path, sftp_fingerprints, sftp_prefix)
|
||||
r = requests.put(urlparse.urljoin(self.userPath, 'user/' + str(user_id)), params={'disconnect':disconnect},
|
||||
json=u, auth=self.auth, verify=self.verify)
|
||||
self.printResponse(r)
|
||||
|
@ -625,7 +644,7 @@ def addCommonUserArguments(parser):
|
|||
parser.add_argument('--allowed-patterns', type=str, nargs='*', default=[], help='Allowed file patterns case insensitive. '
|
||||
+'The format is /dir::pattern1,pattern2. For example: "/somedir::*.jpg,a*b?.png" "/otherdir/subdir::*.zip,*.rar". ' +
|
||||
'Default: %(default)s')
|
||||
parser.add_argument('--fs', type=str, default='local', choices=['local', 'S3', 'GCS', "AzureBlob", "Crypto"],
|
||||
parser.add_argument('--fs', type=str, default='local', choices=['local', 'S3', 'GCS', 'AzureBlob', 'Crypto', 'SFTP'],
|
||||
help='Filesystem provider. Default: %(default)s')
|
||||
parser.add_argument('--s3-bucket', type=str, default='', help='Default: %(default)s')
|
||||
parser.add_argument('--s3-key-prefix', type=str, default='', help='Virtual root directory. If non empty only this ' +
|
||||
|
@ -665,6 +684,12 @@ def addCommonUserArguments(parser):
|
|||
parser.add_argument('--az-use-emulator', type=bool, default=False, help='Default: %(default)s')
|
||||
parser.add_argument('--crypto-passphrase', type=str, default='', help='Passphrase for encryption/decryption, to use ' +
|
||||
'with Crypto filesystem')
|
||||
parser.add_argument('--sftp-endpoint', type=str, default='', help='SFTP endpoint as host:port')
|
||||
parser.add_argument('--sftp-username', type=str, default='', help='Default: %(default)s')
|
||||
parser.add_argument('--sftp-password', type=str, default='', help='Default: %(default)s')
|
||||
parser.add_argument('--sftp-private-key-path', type=str, default='', help='Default: %(default)s')
|
||||
parser.add_argument('--sftp-fingerprints', type=str, nargs='+', default=[], help='Default: %(default)s')
|
||||
parser.add_argument('--sftp-prefix', type=str, default='', help='Default: %(default)s')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -821,7 +846,8 @@ if __name__ == '__main__':
|
|||
args.s3_upload_part_size, args.s3_upload_concurrency, args.max_upload_file_size, args.denied_protocols,
|
||||
args.az_container, args.az_account_name, args.az_account_key, args.az_sas_url, args.az_endpoint,
|
||||
args.az_upload_part_size, args.az_upload_concurrency, args.az_key_prefix, args.az_use_emulator,
|
||||
args.az_access_tier, args.additional_info, args.crypto_passphrase)
|
||||
args.az_access_tier, args.additional_info, args.crypto_passphrase, args.sftp_endpoint, args.sftp_username,
|
||||
args.sftp_password, args.sftp_private_key_path, args.sftp_fingerprints, args.sftp_prefix)
|
||||
elif args.command == 'update-user':
|
||||
api.updateUser(args.id, args.username, args.password, args.public_keys, args.home_dir, args.uid, args.gid,
|
||||
args.max_sessions, args.quota_size, args.quota_files, args.permissions, args.upload_bandwidth,
|
||||
|
@ -834,7 +860,9 @@ if __name__ == '__main__':
|
|||
args.s3_upload_concurrency, args.max_upload_file_size, args.denied_protocols, args.disconnect,
|
||||
args.az_container, args.az_account_name, args.az_account_key, args.az_sas_url, args.az_endpoint,
|
||||
args.az_upload_part_size, args.az_upload_concurrency, args.az_key_prefix, args.az_use_emulator,
|
||||
args.az_access_tier, args.additional_info, args.crypto_passphrase)
|
||||
args.az_access_tier, args.additional_info, args.crypto_passphrase, args.sftp_endpoint,
|
||||
args.sftp_username, args.sftp_password, args.sftp_private_key_path, args.sftp_fingerprints,
|
||||
args.sftp_prefix)
|
||||
elif args.command == 'delete-user':
|
||||
api.deleteUser(args.id)
|
||||
elif args.command == 'get-users':
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,6 +2,7 @@ package ftpd
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
@ -398,8 +399,12 @@ func (c *Connection) handleFTPUploadToExistingFile(flags int, resolvedPath, file
|
|||
c.Log(logger.LevelDebug, "upload resume requested, file path: %#v initial size: %v", filePath, fileSize)
|
||||
minWriteOffset = fileSize
|
||||
initialSize = fileSize
|
||||
if vfs.IsSFTPFs(c.Fs) {
|
||||
// we need this since we don't allow resume with wrong offset, we should fix this in pkg/sftp
|
||||
file.Seek(initialSize, io.SeekStart) //nolint:errcheck // for sftp seek cannot file, it simply set the offset
|
||||
}
|
||||
} else {
|
||||
if vfs.IsLocalOsFs(c.Fs) {
|
||||
if vfs.IsLocalOrSFTPFs(c.Fs) {
|
||||
vfolder, err := c.User.GetVirtualFolderForPath(path.Dir(requestPath))
|
||||
if err == nil {
|
||||
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, 0, -fileSize, false) //nolint:errcheck
|
||||
|
|
3
go.mod
3
go.mod
|
@ -37,7 +37,7 @@ require (
|
|||
github.com/pelletier/go-toml v1.8.1 // indirect
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible // indirect
|
||||
github.com/pires/go-proxyproto v0.3.2
|
||||
github.com/pkg/sftp v1.12.1-0.20201118115123-7230c61342c8
|
||||
github.com/pkg/sftp v1.12.1-0.20201128220914-b5b6f3393fe9
|
||||
github.com/prometheus/client_golang v1.8.0
|
||||
github.com/prometheus/common v0.15.0 // indirect
|
||||
github.com/rs/cors v1.7.1-0.20200626170627-8b4a00bd362b
|
||||
|
@ -72,6 +72,7 @@ require (
|
|||
|
||||
replace (
|
||||
github.com/jlaffaye/ftp => github.com/drakkan/ftp v0.0.0-20201114075148-9b9adce499a9
|
||||
github.com/pkg/sftp => github.com/drakkan/sftp v0.0.0-20201211115031-0b6bbc64f191
|
||||
golang.org/x/crypto => github.com/drakkan/crypto v0.0.0-20201206210642-67b183f44ef5
|
||||
golang.org/x/net => github.com/drakkan/net v0.0.0-20201206210821-d337634bad94
|
||||
)
|
||||
|
|
44
go.sum
44
go.sum
|
@ -61,35 +61,28 @@ github.com/Azure/azure-storage-blob-go v0.11.0 h1:WCTHKKNkHlzm7lzUNXRSD11784LwJq
|
|||
github.com/Azure/azure-storage-blob-go v0.11.0/go.mod h1:A0u4VjtpgZJ7Y7um/+ix2DHBuEKFC6sEIlj0xc13a4Q=
|
||||
github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo=
|
||||
github.com/Azure/go-amqp v0.12.7/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962 h1:KeNholpO2xKjgaaSyd+DyQRrsQjhbSeS7qe4nEw8aQw=
|
||||
|
@ -164,7 +157,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
|
@ -174,6 +166,8 @@ github.com/drakkan/ftp v0.0.0-20201114075148-9b9adce499a9 h1:LPH1dEblAOO/LoG7yHP
|
|||
github.com/drakkan/ftp v0.0.0-20201114075148-9b9adce499a9/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
|
||||
github.com/drakkan/net v0.0.0-20201206210821-d337634bad94 h1:LXlMPJumaNy8ypjAVIqVZ8KKWn+13NJrFrDB3QY6YA8=
|
||||
github.com/drakkan/net v0.0.0-20201206210821-d337634bad94/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
github.com/drakkan/sftp v0.0.0-20201211115031-0b6bbc64f191 h1:c+RLqMs6Aqc8IDc5MWTf+zqNlO4+5WfiJqZzHFlr4a8=
|
||||
github.com/drakkan/sftp v0.0.0-20201211115031-0b6bbc64f191/go.mod h1:fUqqXB5vEgVCZ131L+9say31RAri6aF6KDViawhxKK8=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
|
@ -194,7 +188,6 @@ github.com/fclairamb/ftpserverlib v0.9.1-0.20201105003045-1edd6bf7ae53/go.mod h1
|
|||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/frankban/quicktest v1.11.2 h1:mjwHjStlXWibxOohM7HYieIViKyh56mmt3+6viyhDDI=
|
||||
github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
|
@ -212,19 +205,16 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
|
|||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
|
||||
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
|
@ -277,16 +267,12 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
|
||||
github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk=
|
||||
github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE=
|
||||
github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
|
@ -314,7 +300,6 @@ github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk
|
|||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
|
@ -340,7 +325,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo
|
|||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
|
||||
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
|
@ -386,7 +370,6 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
|
|||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
|
@ -394,7 +377,6 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht
|
|||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
|
@ -404,9 +386,7 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
|
|||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
|
@ -418,10 +398,8 @@ github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
|||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
|
||||
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
|
@ -457,7 +435,6 @@ github.com/minio/sio v0.2.1/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebh
|
|||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
|
@ -482,7 +459,6 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
|
|||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
|
@ -503,10 +479,8 @@ github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
|
|||
github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
|
||||
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
github.com/otiai10/curr v1.0.0 h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI=
|
||||
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
|
||||
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
|
||||
github.com/otiai10/mint v1.3.1 h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc=
|
||||
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
|
@ -527,9 +501,6 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/sftp v1.12.1-0.20201118115123-7230c61342c8 h1:X1sEOEdHMFSWtt7rdOuJ+9oSKZWeN0HqfMM0Ry1Zxms=
|
||||
github.com/pkg/sftp v1.12.1-0.20201118115123-7230c61342c8/go.mod h1:fUqqXB5vEgVCZ131L+9say31RAri6aF6KDViawhxKK8=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
|
@ -584,15 +555,12 @@ github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkB
|
|||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
|
@ -693,7 +661,6 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
|
|||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
|
@ -703,7 +670,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
|||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -723,7 +689,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -778,7 +743,6 @@ golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88 h1:KmZPnMocC93w341XZp26yTJg8Za7lhb2KhkYmixoeso=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -856,7 +820,6 @@ golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4X
|
|||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201202200335-bef1c476418a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201204222352-654352759326 h1:XKLw9EEEfGJFE2K5Ni4nXgtFBIfI+zKPIi2SlRYmIG4=
|
||||
golang.org/x/tools v0.0.0-20201204222352-654352759326/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -894,7 +857,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
|
@ -982,10 +944,8 @@ gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUy
|
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/dutchcoders/goftp.v1 v1.0.0-20170301105846-ed59a591ce14 h1:tHqNpm9sPaE6BSuMLXBzgTwukQLdBEt4OYU2coQjEQQ=
|
||||
gopkg.in/dutchcoders/goftp.v1 v1.0.0-20170301105846-ed59a591ce14/go.mod h1:nzmlZQ+UqB5+55CRTV/dOaiK8OrPl6Co96Ob8lH4Wxw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
|
|
@ -159,6 +159,7 @@ func doQuotaScan(user dataprovider.User) error {
|
|||
logger.Warn(logSender, "", "unable scan quota for user %#v error creating filesystem: %v", user.Username, err)
|
||||
return err
|
||||
}
|
||||
defer fs.Close()
|
||||
numFiles, size, err := fs.ScanRootDirContents()
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error scanning user home dir %#v: %v", user.Username, err)
|
||||
|
|
|
@ -105,6 +105,15 @@ func addUser(w http.ResponseWriter, r *http.Request) {
|
|||
sendAPIResponse(w, r, errors.New("invalid passphrase"), "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
case dataprovider.SFTPFilesystemProvider:
|
||||
if user.FsConfig.SFTPConfig.Password.IsRedacted() {
|
||||
sendAPIResponse(w, r, errors.New("invalid SFTP password"), "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if user.FsConfig.SFTPConfig.PrivateKey.IsRedacted() {
|
||||
sendAPIResponse(w, r, errors.New("invalid SFTP private key"), "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
err = dataprovider.AddUser(user)
|
||||
if err == nil {
|
||||
|
@ -143,28 +152,19 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
currentPermissions := user.Permissions
|
||||
var currentS3AccessSecret *kms.Secret
|
||||
var currentAzAccountKey *kms.Secret
|
||||
var currentGCSCredentials *kms.Secret
|
||||
var currentCryptoPassphrase *kms.Secret
|
||||
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
|
||||
currentS3AccessSecret = user.FsConfig.S3Config.AccessSecret
|
||||
}
|
||||
if user.FsConfig.Provider == dataprovider.AzureBlobFilesystemProvider {
|
||||
currentAzAccountKey = user.FsConfig.AzBlobConfig.AccountKey
|
||||
}
|
||||
if user.FsConfig.Provider == dataprovider.GCSFilesystemProvider {
|
||||
currentGCSCredentials = user.FsConfig.GCSConfig.Credentials
|
||||
}
|
||||
if user.FsConfig.Provider == dataprovider.CryptedFilesystemProvider {
|
||||
currentCryptoPassphrase = user.FsConfig.CryptConfig.Passphrase
|
||||
}
|
||||
currentS3AccessSecret := user.FsConfig.S3Config.AccessSecret
|
||||
currentAzAccountKey := user.FsConfig.AzBlobConfig.AccountKey
|
||||
currentGCSCredentials := user.FsConfig.GCSConfig.Credentials
|
||||
currentCryptoPassphrase := user.FsConfig.CryptConfig.Passphrase
|
||||
currentSFTPPassword := user.FsConfig.SFTPConfig.Password
|
||||
currentSFTPKey := user.FsConfig.SFTPConfig.PrivateKey
|
||||
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.CryptConfig = vfs.CryptFsConfig{}
|
||||
user.FsConfig.SFTPConfig = vfs.SFTPFsConfig{}
|
||||
err = render.DecodeJSON(r.Body, &user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
|
@ -175,7 +175,8 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
|
|||
if len(user.Permissions) == 0 {
|
||||
user.Permissions = currentPermissions
|
||||
}
|
||||
updateEncryptedSecrets(&user, currentS3AccessSecret, currentAzAccountKey, currentGCSCredentials, currentCryptoPassphrase)
|
||||
updateEncryptedSecrets(&user, currentS3AccessSecret, currentAzAccountKey, currentGCSCredentials, currentCryptoPassphrase,
|
||||
currentSFTPPassword, currentSFTPKey)
|
||||
if user.ID != userID {
|
||||
sendAPIResponse(w, r, err, "user ID in request body does not match user ID in path parameter", http.StatusBadRequest)
|
||||
return
|
||||
|
@ -221,26 +222,31 @@ func disconnectUser(username string) {
|
|||
}
|
||||
|
||||
func updateEncryptedSecrets(user *dataprovider.User, currentS3AccessSecret, currentAzAccountKey,
|
||||
currentGCSCredentials *kms.Secret, currentCryptoPassphrase *kms.Secret) {
|
||||
currentGCSCredentials, currentCryptoPassphrase, currentSFTPPassword, currentSFTPKey *kms.Secret) {
|
||||
// we use the new access secret if plain or empty, otherwise the old value
|
||||
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
|
||||
if !user.FsConfig.S3Config.AccessSecret.IsPlain() && !user.FsConfig.S3Config.AccessSecret.IsEmpty() {
|
||||
switch user.FsConfig.Provider {
|
||||
case dataprovider.S3FilesystemProvider:
|
||||
if user.FsConfig.S3Config.AccessSecret.IsNotPlainAndNotEmpty() {
|
||||
user.FsConfig.S3Config.AccessSecret = currentS3AccessSecret
|
||||
}
|
||||
}
|
||||
if user.FsConfig.Provider == dataprovider.AzureBlobFilesystemProvider {
|
||||
if !user.FsConfig.AzBlobConfig.AccountKey.IsPlain() && !user.FsConfig.AzBlobConfig.AccountKey.IsEmpty() {
|
||||
case dataprovider.AzureBlobFilesystemProvider:
|
||||
if user.FsConfig.AzBlobConfig.AccountKey.IsNotPlainAndNotEmpty() {
|
||||
user.FsConfig.AzBlobConfig.AccountKey = currentAzAccountKey
|
||||
}
|
||||
}
|
||||
if user.FsConfig.Provider == dataprovider.GCSFilesystemProvider {
|
||||
if !user.FsConfig.GCSConfig.Credentials.IsPlain() && !user.FsConfig.GCSConfig.Credentials.IsEmpty() {
|
||||
case dataprovider.GCSFilesystemProvider:
|
||||
if user.FsConfig.GCSConfig.Credentials.IsNotPlainAndNotEmpty() {
|
||||
user.FsConfig.GCSConfig.Credentials = currentGCSCredentials
|
||||
}
|
||||
}
|
||||
if user.FsConfig.Provider == dataprovider.CryptedFilesystemProvider {
|
||||
if !user.FsConfig.CryptConfig.Passphrase.IsPlain() && !user.FsConfig.CryptConfig.Passphrase.IsEmpty() {
|
||||
case dataprovider.CryptedFilesystemProvider:
|
||||
if user.FsConfig.CryptConfig.Passphrase.IsNotPlainAndNotEmpty() {
|
||||
user.FsConfig.CryptConfig.Passphrase = currentCryptoPassphrase
|
||||
}
|
||||
case dataprovider.SFTPFilesystemProvider:
|
||||
if user.FsConfig.SFTPConfig.Password.IsNotPlainAndNotEmpty() {
|
||||
user.FsConfig.SFTPConfig.Password = currentSFTPPassword
|
||||
}
|
||||
if user.FsConfig.SFTPConfig.PrivateKey.IsNotPlainAndNotEmpty() {
|
||||
user.FsConfig.SFTPConfig.PrivateKey = currentSFTPKey
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -553,7 +553,7 @@ func checkFolder(expected *vfs.BaseVirtualFolder, actual *vfs.BaseVirtualFolder)
|
|||
}
|
||||
|
||||
func checkUser(expected *dataprovider.User, actual *dataprovider.User) error {
|
||||
if len(actual.Password) > 0 {
|
||||
if actual.Password != "" {
|
||||
return errors.New("User password must not be visible")
|
||||
}
|
||||
if expected.ID <= 0 {
|
||||
|
@ -627,6 +627,9 @@ func compareUserFsConfig(expected *dataprovider.User, actual *dataprovider.User)
|
|||
if err := checkEncryptedSecret(expected.FsConfig.CryptConfig.Passphrase, actual.FsConfig.CryptConfig.Passphrase); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := compareSFTPFsConfig(expected, actual); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -679,6 +682,35 @@ func compareGCSConfig(expected *dataprovider.User, actual *dataprovider.User) er
|
|||
return nil
|
||||
}
|
||||
|
||||
func compareSFTPFsConfig(expected *dataprovider.User, actual *dataprovider.User) error {
|
||||
if expected.FsConfig.SFTPConfig.Endpoint != actual.FsConfig.SFTPConfig.Endpoint {
|
||||
return errors.New("SFTPFs endpoint mismatch")
|
||||
}
|
||||
if expected.FsConfig.SFTPConfig.Username != actual.FsConfig.SFTPConfig.Username {
|
||||
return errors.New("SFTPFs username mismatch")
|
||||
}
|
||||
if err := checkEncryptedSecret(expected.FsConfig.SFTPConfig.Password, actual.FsConfig.SFTPConfig.Password); err != nil {
|
||||
return fmt.Errorf("SFTPFs password mismatch: %v", err)
|
||||
}
|
||||
if err := checkEncryptedSecret(expected.FsConfig.SFTPConfig.PrivateKey, actual.FsConfig.SFTPConfig.PrivateKey); err != nil {
|
||||
return fmt.Errorf("SFTPFs private key mismatch: %v", err)
|
||||
}
|
||||
if expected.FsConfig.SFTPConfig.Prefix != actual.FsConfig.SFTPConfig.Prefix {
|
||||
if expected.FsConfig.SFTPConfig.Prefix != "" && actual.FsConfig.SFTPConfig.Prefix != "/" {
|
||||
return errors.New("SFTPFs prefix mismatch")
|
||||
}
|
||||
}
|
||||
if len(expected.FsConfig.SFTPConfig.Fingerprints) != len(actual.FsConfig.SFTPConfig.Fingerprints) {
|
||||
return errors.New("SFTPFs fingerprints mismatch")
|
||||
}
|
||||
for _, value := range actual.FsConfig.SFTPConfig.Fingerprints {
|
||||
if !utils.IsStringInSlice(value, expected.FsConfig.SFTPConfig.Fingerprints) {
|
||||
return errors.New("SFTPFs fingerprints mismatch")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareAzBlobConfig(expected *dataprovider.User, actual *dataprovider.User) error {
|
||||
if expected.FsConfig.AzBlobConfig.Container != actual.FsConfig.AzBlobConfig.Container {
|
||||
return errors.New("Azure Blob container mismatch")
|
||||
|
|
|
@ -84,6 +84,14 @@ UM2lmBLIXpGgBwYFK4EEACKhZANiAARCjRMqJ85rzMC998X5z761nJ+xL3bkmGVq
|
|||
WvrJ51t5OxV0v25NsOgR82CANXUgvhVYs7vNFN+jxtb2aj6Xg+/2G/BNxkaFspIV
|
||||
CzgWkxiz7XE4lgUwX44FCXZM3+JeUbI=
|
||||
-----END EC PRIVATE KEY-----`
|
||||
sftpPrivateKey = `-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACB+RB4yNTZz9mHOkawwUibNdemijVV3ErMeLxWUBlCN/gAAAJA7DjpfOw46
|
||||
XwAAAAtzc2gtZWQyNTUxOQAAACB+RB4yNTZz9mHOkawwUibNdemijVV3ErMeLxWUBlCN/g
|
||||
AAAEA0E24gi8ab/XRSvJ85TGZJMe6HVmwxSG4ExPfTMwwe2n5EHjI1NnP2Yc6RrDBSJs11
|
||||
6aKNVXcSsx4vFZQGUI3+AAAACW5pY29sYUBwMQECAwQ=
|
||||
-----END OPENSSH PRIVATE KEY-----`
|
||||
sftpPkeyFingerprint = "SHA256:QVQ06XHZZbYZzqfrsZcf3Yozy2WTnqQPeLOkcJCdbP0"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -525,6 +533,17 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
|
|||
u.FsConfig.CryptConfig.Passphrase = kms.NewSecret(kms.SecretStatusRedacted, "akey", "", "")
|
||||
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u = getTestUser()
|
||||
u.FsConfig.Provider = dataprovider.SFTPFilesystemProvider
|
||||
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.FsConfig.SFTPConfig.Password = kms.NewSecret(kms.SecretStatusRedacted, "randompkey", "", "")
|
||||
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.FsConfig.SFTPConfig.Password = kms.NewEmptySecret()
|
||||
u.FsConfig.SFTPConfig.PrivateKey = kms.NewSecret(kms.SecretStatusRedacted, "keyforpkey", "", "")
|
||||
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAddUserInvalidVirtualFolders(t *testing.T) {
|
||||
|
@ -1058,8 +1077,8 @@ func TestUserS3Config(t *testing.T) {
|
|||
user.FsConfig.S3Config.Endpoint = "http://localhost:9000"
|
||||
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir" //nolint:goconst
|
||||
user.FsConfig.S3Config.UploadConcurrency = 5
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
user, bb, err := httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err, string(bb))
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.S3Config.AccessSecret.GetStatus())
|
||||
assert.Equal(t, initialSecretPayload, user.FsConfig.S3Config.AccessSecret.GetPayload())
|
||||
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.GetAdditionalData())
|
||||
|
@ -1099,8 +1118,8 @@ func TestUserGCSConfig(t *testing.T) {
|
|||
user.FsConfig.Provider = dataprovider.GCSFilesystemProvider
|
||||
user.FsConfig.GCSConfig.Bucket = "test"
|
||||
user.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret("fake credentials") //nolint:goconst
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
user, bb, err := httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err, string(bb))
|
||||
credentialFile := filepath.Join(credentialsPath, fmt.Sprintf("%v_gcs_credentials.json", user.Username))
|
||||
assert.FileExists(t, credentialFile)
|
||||
creds, err := ioutil.ReadFile(credentialFile)
|
||||
|
@ -1292,6 +1311,81 @@ func TestUserCryptFs(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUserSFTPFs(t *testing.T) {
|
||||
user, _, err := httpd.AddUser(getTestUser(), http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = dataprovider.SFTPFilesystemProvider
|
||||
user.FsConfig.SFTPConfig.Endpoint = "127.0.0.1:2022"
|
||||
user.FsConfig.SFTPConfig.Username = "sftp_user"
|
||||
user.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("sftp_pwd")
|
||||
user.FsConfig.SFTPConfig.PrivateKey = kms.NewPlainSecret(sftpPrivateKey)
|
||||
user.FsConfig.SFTPConfig.Fingerprints = []string{sftpPkeyFingerprint}
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/", user.FsConfig.SFTPConfig.Prefix)
|
||||
initialPwdPayload := user.FsConfig.SFTPConfig.Password.GetPayload()
|
||||
initialPkeyPayload := user.FsConfig.SFTPConfig.PrivateKey.GetPayload()
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.SFTPConfig.Password.GetStatus())
|
||||
assert.NotEmpty(t, initialPwdPayload)
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.Password.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.Password.GetKey())
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.SFTPConfig.PrivateKey.GetStatus())
|
||||
assert.NotEmpty(t, initialPkeyPayload)
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
user.FsConfig.SFTPConfig.Password.SetStatus(kms.SecretStatusSecretBox)
|
||||
user.FsConfig.SFTPConfig.Password.SetAdditionalData("adata")
|
||||
user.FsConfig.SFTPConfig.Password.SetKey("fake pwd key")
|
||||
user.FsConfig.SFTPConfig.PrivateKey.SetStatus(kms.SecretStatusSecretBox)
|
||||
user.FsConfig.SFTPConfig.PrivateKey.SetAdditionalData("adata")
|
||||
user.FsConfig.SFTPConfig.PrivateKey.SetKey("fake key")
|
||||
user, bb, err := httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err, string(bb))
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.SFTPConfig.Password.GetStatus())
|
||||
assert.Equal(t, initialPwdPayload, user.FsConfig.SFTPConfig.Password.GetPayload())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.Password.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.Password.GetKey())
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.SFTPConfig.PrivateKey.GetStatus())
|
||||
assert.Equal(t, initialPkeyPayload, user.FsConfig.SFTPConfig.PrivateKey.GetPayload())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
user.Password = defaultPassword
|
||||
user.ID = 0
|
||||
secret := kms.NewSecret(kms.SecretStatusSecretBox, "invalid encrypted payload", "", "")
|
||||
user.FsConfig.SFTPConfig.Password = secret
|
||||
_, _, err = httpd.AddUser(user, http.StatusOK)
|
||||
assert.Error(t, err)
|
||||
user.FsConfig.SFTPConfig.Password = kms.NewEmptySecret()
|
||||
user.FsConfig.SFTPConfig.PrivateKey = secret
|
||||
_, _, err = httpd.AddUser(user, http.StatusOK)
|
||||
assert.Error(t, err)
|
||||
|
||||
user.FsConfig.SFTPConfig.PrivateKey = kms.NewPlainSecret(sftpPrivateKey)
|
||||
user, _, err = httpd.AddUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
initialPkeyPayload = user.FsConfig.SFTPConfig.PrivateKey.GetPayload()
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.Password.GetStatus())
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.SFTPConfig.PrivateKey.GetStatus())
|
||||
assert.NotEmpty(t, initialPkeyPayload)
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
user.FsConfig.Provider = dataprovider.SFTPFilesystemProvider
|
||||
user.FsConfig.SFTPConfig.PrivateKey.SetKey("k")
|
||||
user, bb, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err, string(bb))
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.SFTPConfig.PrivateKey.GetStatus())
|
||||
assert.NotEmpty(t, initialPkeyPayload)
|
||||
assert.Equal(t, initialPkeyPayload, user.FsConfig.SFTPConfig.PrivateKey.GetPayload())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUserHiddenFields(t *testing.T) {
|
||||
err := dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
|
@ -1303,7 +1397,7 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// sensitive data must be hidden but not deleted from the dataprovider
|
||||
usernames := []string{"user1", "user2", "user3", "user4"}
|
||||
usernames := []string{"user1", "user2", "user3", "user4", "user5"}
|
||||
u1 := getTestUser()
|
||||
u1.Username = usernames[0]
|
||||
u1.FsConfig.Provider = dataprovider.S3FilesystemProvider
|
||||
|
@ -1338,9 +1432,21 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
user4, _, err := httpd.AddUser(u4, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
||||
u5 := getTestUser()
|
||||
u5.Username = usernames[4]
|
||||
u5.FsConfig.Provider = dataprovider.SFTPFilesystemProvider
|
||||
u5.FsConfig.SFTPConfig.Endpoint = "127.0.0.1:2022"
|
||||
u5.FsConfig.SFTPConfig.Username = "sftp_user"
|
||||
u5.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("apassword")
|
||||
u5.FsConfig.SFTPConfig.PrivateKey = kms.NewPlainSecret(sftpPrivateKey)
|
||||
u5.FsConfig.SFTPConfig.Fingerprints = []string{sftpPkeyFingerprint}
|
||||
u5.FsConfig.SFTPConfig.Prefix = "/prefix"
|
||||
user5, _, err := httpd.AddUser(u5, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
||||
users, _, err := httpd.GetUsers(0, 0, "", http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(users), 4)
|
||||
assert.GreaterOrEqual(t, len(users), 5)
|
||||
for _, username := range usernames {
|
||||
users, _, err = httpd.GetUsers(0, 0, username, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1381,6 +1487,19 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
assert.NotEmpty(t, user4.FsConfig.CryptConfig.Passphrase.GetStatus())
|
||||
assert.NotEmpty(t, user4.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
|
||||
user5, _, err = httpd.GetUserByID(user5.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, user5.Password)
|
||||
assert.Empty(t, user5.FsConfig.SFTPConfig.Password.GetKey())
|
||||
assert.Empty(t, user5.FsConfig.SFTPConfig.Password.GetAdditionalData())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.Password.GetStatus())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.Password.GetPayload())
|
||||
assert.Empty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
assert.Empty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetStatus())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetPayload())
|
||||
assert.Equal(t, "/prefix", user5.FsConfig.SFTPConfig.Prefix)
|
||||
|
||||
// finally check that we have all the data inside the data provider
|
||||
user1, err = dataprovider.GetUserByID(user1.ID)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1438,6 +1557,30 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
assert.Empty(t, user4.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
assert.Empty(t, user4.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
|
||||
user5, err = dataprovider.GetUserByID(user5.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, user5.Password)
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.Password.GetKey())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.Password.GetAdditionalData())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.Password.GetStatus())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.Password.GetPayload())
|
||||
err = user5.FsConfig.SFTPConfig.Password.Decrypt()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, kms.SecretStatusPlain, user5.FsConfig.SFTPConfig.Password.GetStatus())
|
||||
assert.Equal(t, u5.FsConfig.SFTPConfig.Password.GetPayload(), user5.FsConfig.SFTPConfig.Password.GetPayload())
|
||||
assert.Empty(t, user5.FsConfig.SFTPConfig.Password.GetKey())
|
||||
assert.Empty(t, user5.FsConfig.SFTPConfig.Password.GetAdditionalData())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetStatus())
|
||||
assert.NotEmpty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetPayload())
|
||||
err = user5.FsConfig.SFTPConfig.PrivateKey.Decrypt()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, kms.SecretStatusPlain, user5.FsConfig.SFTPConfig.PrivateKey.GetStatus())
|
||||
assert.Equal(t, u5.FsConfig.SFTPConfig.PrivateKey.GetPayload(), user5.FsConfig.SFTPConfig.PrivateKey.GetPayload())
|
||||
assert.Empty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
assert.Empty(t, user5.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
|
||||
_, err = httpd.RemoveUser(user1, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user2, http.StatusOK)
|
||||
|
@ -1446,6 +1589,8 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user4, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user5, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
|
@ -3593,6 +3738,111 @@ func TestWebUserCryptMock(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestWebUserSFTPFsMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = dataprovider.SFTPFilesystemProvider
|
||||
user.FsConfig.SFTPConfig.Endpoint = "127.0.0.1"
|
||||
user.FsConfig.SFTPConfig.Username = "sftpuser"
|
||||
user.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("pwd")
|
||||
user.FsConfig.SFTPConfig.PrivateKey = kms.NewPlainSecret(sftpPrivateKey)
|
||||
user.FsConfig.SFTPConfig.Fingerprints = []string{sftpPkeyFingerprint}
|
||||
user.FsConfig.SFTPConfig.Prefix = "/home/sftpuser"
|
||||
form := make(url.Values)
|
||||
form.Set("username", user.Username)
|
||||
form.Set("home_dir", user.HomeDir)
|
||||
form.Set("uid", "0")
|
||||
form.Set("gid", strconv.FormatInt(int64(user.GID), 10))
|
||||
form.Set("max_sessions", strconv.FormatInt(int64(user.MaxSessions), 10))
|
||||
form.Set("quota_size", strconv.FormatInt(user.QuotaSize, 10))
|
||||
form.Set("quota_files", strconv.FormatInt(int64(user.QuotaFiles), 10))
|
||||
form.Set("upload_bandwidth", "0")
|
||||
form.Set("download_bandwidth", "0")
|
||||
form.Set("permissions", "*")
|
||||
form.Set("sub_dirs_permissions", "")
|
||||
form.Set("status", strconv.Itoa(user.Status))
|
||||
form.Set("expiration_date", "2020-01-01 00:00:00")
|
||||
form.Set("allowed_ip", "")
|
||||
form.Set("denied_ip", "")
|
||||
form.Set("fs_provider", "5")
|
||||
form.Set("crypt_passphrase", "")
|
||||
form.Set("allowed_extensions", "/dir1::.jpg,.png")
|
||||
form.Set("denied_extensions", "/dir2::.zip")
|
||||
form.Set("max_upload_file_size", "0")
|
||||
// empty sftpconfig
|
||||
b, contentType, _ := getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
form.Set("sftp_endpoint", user.FsConfig.SFTPConfig.Endpoint)
|
||||
form.Set("sftp_username", user.FsConfig.SFTPConfig.Username)
|
||||
form.Set("sftp_password", user.FsConfig.SFTPConfig.Password.GetPayload())
|
||||
form.Set("sftp_private_key", user.FsConfig.SFTPConfig.PrivateKey.GetPayload())
|
||||
form.Set("sftp_fingerprints", user.FsConfig.SFTPConfig.Fingerprints[0])
|
||||
form.Set("sftp_prefix", user.FsConfig.SFTPConfig.Prefix)
|
||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusSeeOther, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASC&username="+user.Username, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
var users []dataprovider.User
|
||||
err = render.DecodeJSON(rr.Body, &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(users))
|
||||
updateUser := users[0]
|
||||
assert.Equal(t, int64(1577836800000), updateUser.ExpirationDate)
|
||||
assert.Equal(t, 2, len(updateUser.Filters.FileExtensions))
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, updateUser.FsConfig.SFTPConfig.Password.GetStatus())
|
||||
assert.NotEmpty(t, updateUser.FsConfig.SFTPConfig.Password.GetPayload())
|
||||
assert.Empty(t, updateUser.FsConfig.SFTPConfig.Password.GetKey())
|
||||
assert.Empty(t, updateUser.FsConfig.SFTPConfig.Password.GetAdditionalData())
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, updateUser.FsConfig.SFTPConfig.PrivateKey.GetStatus())
|
||||
assert.NotEmpty(t, updateUser.FsConfig.SFTPConfig.PrivateKey.GetPayload())
|
||||
assert.Empty(t, updateUser.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
assert.Empty(t, updateUser.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
assert.Equal(t, updateUser.FsConfig.SFTPConfig.Prefix, user.FsConfig.SFTPConfig.Prefix)
|
||||
assert.Equal(t, updateUser.FsConfig.SFTPConfig.Username, user.FsConfig.SFTPConfig.Username)
|
||||
assert.Equal(t, updateUser.FsConfig.SFTPConfig.Endpoint, user.FsConfig.SFTPConfig.Endpoint)
|
||||
assert.Len(t, updateUser.FsConfig.SFTPConfig.Fingerprints, 1)
|
||||
assert.Contains(t, updateUser.FsConfig.SFTPConfig.Fingerprints, sftpPkeyFingerprint)
|
||||
// now check that a redacted credentials are not saved
|
||||
form.Set("sftp_password", "[**redacted**] ")
|
||||
form.Set("sftp_private_key", "[**redacted**]")
|
||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusSeeOther, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASC&username="+user.Username, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
users = nil
|
||||
err = render.DecodeJSON(rr.Body, &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(users))
|
||||
lastUpdatedUser := users[0]
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, lastUpdatedUser.FsConfig.SFTPConfig.Password.GetStatus())
|
||||
assert.Equal(t, updateUser.FsConfig.SFTPConfig.Password.GetPayload(), lastUpdatedUser.FsConfig.SFTPConfig.Password.GetPayload())
|
||||
assert.Empty(t, lastUpdatedUser.FsConfig.SFTPConfig.Password.GetKey())
|
||||
assert.Empty(t, lastUpdatedUser.FsConfig.SFTPConfig.Password.GetAdditionalData())
|
||||
assert.Equal(t, kms.SecretStatusSecretBox, lastUpdatedUser.FsConfig.SFTPConfig.PrivateKey.GetStatus())
|
||||
assert.Equal(t, updateUser.FsConfig.SFTPConfig.PrivateKey.GetPayload(), lastUpdatedUser.FsConfig.SFTPConfig.PrivateKey.GetPayload())
|
||||
assert.Empty(t, lastUpdatedUser.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
assert.Empty(t, lastUpdatedUser.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddWebFoldersMock(t *testing.T) {
|
||||
mappedPath := filepath.Clean(os.TempDir())
|
||||
form := make(url.Values)
|
||||
|
|
|
@ -394,6 +394,32 @@ func TestCompareUserFsConfig(t *testing.T) {
|
|||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
expected.FsConfig.CryptConfig.Passphrase = kms.NewEmptySecret()
|
||||
expected.FsConfig.SFTPConfig.Endpoint = "endpoint"
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
expected.FsConfig.SFTPConfig.Endpoint = ""
|
||||
expected.FsConfig.SFTPConfig.Username = "user"
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
expected.FsConfig.SFTPConfig.Username = ""
|
||||
expected.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("sftppwd")
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
expected.FsConfig.SFTPConfig.Password = kms.NewEmptySecret()
|
||||
expected.FsConfig.SFTPConfig.PrivateKey = kms.NewPlainSecret("fake key")
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
expected.FsConfig.SFTPConfig.PrivateKey = kms.NewEmptySecret()
|
||||
expected.FsConfig.SFTPConfig.Prefix = "/home"
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
expected.FsConfig.SFTPConfig.Prefix = ""
|
||||
expected.FsConfig.SFTPConfig.Fingerprints = []string{"sha256:..."}
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
actual.FsConfig.SFTPConfig.Fingerprints = []string{"sha256:different"}
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCompareUserGCSConfig(t *testing.T) {
|
||||
|
|
|
@ -2,7 +2,7 @@ openapi: 3.0.3
|
|||
info:
|
||||
title: SFTPGo
|
||||
description: SFTPGo REST API
|
||||
version: 2.2.1
|
||||
version: 2.2.2
|
||||
|
||||
servers:
|
||||
- url: /api/v1
|
||||
|
@ -1084,6 +1084,27 @@ components:
|
|||
passphrase:
|
||||
$ref: '#/components/schemas/Secret'
|
||||
description: Crypt filesystem configuration details
|
||||
SFTPFsConfig:
|
||||
type: object
|
||||
properties:
|
||||
endpoint:
|
||||
type: string
|
||||
description: remote SFTP endpoint as host:port
|
||||
username:
|
||||
type: string
|
||||
description: you can specify a password or private key or both. In the latter case the private key will be tried first.
|
||||
password:
|
||||
$ref: '#/components/schemas/Secret'
|
||||
private_key:
|
||||
$ref: '#/components/schemas/Secret'
|
||||
fingerprints:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: SHA256 fingerprints to use for host key verification. If you don't provide any fingerprint the remote host key will not be verified, this is a security risk
|
||||
prefix:
|
||||
type: string
|
||||
description: Specifying a prefix you can restrict all operations to a given path within the remote SFTP server.
|
||||
FilesystemConfig:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -1095,6 +1116,7 @@ components:
|
|||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
description: >
|
||||
Providers:
|
||||
* `0` - Local filesystem
|
||||
|
@ -1102,6 +1124,7 @@ components:
|
|||
* `2` - Google Cloud Storage
|
||||
* `3` - Azure Blob Storage
|
||||
* `4` - Local filesystem encrypted
|
||||
* `5` - SFTP
|
||||
s3config:
|
||||
$ref: '#/components/schemas/S3Config'
|
||||
gcsconfig:
|
||||
|
@ -1110,6 +1133,8 @@ components:
|
|||
$ref: '#/components/schemas/AzureBlobFsConfig'
|
||||
cryptconfig:
|
||||
$ref: '#/components/schemas/CryptFsConfig'
|
||||
sftpconfig:
|
||||
$ref: '#/components/schemas/SFTPFsConfig'
|
||||
description: Storage filesystem details
|
||||
BaseVirtualFolder:
|
||||
type: object
|
||||
|
|
46
httpd/web.go
46
httpd/web.go
|
@ -506,6 +506,18 @@ func getGCSConfig(r *http.Request) (vfs.GCSFsConfig, error) {
|
|||
return config, err
|
||||
}
|
||||
|
||||
func getSFTPConfig(r *http.Request) vfs.SFTPFsConfig {
|
||||
config := vfs.SFTPFsConfig{}
|
||||
config.Endpoint = r.Form.Get("sftp_endpoint")
|
||||
config.Username = r.Form.Get("sftp_username")
|
||||
config.Password = getSecretFromFormField(r, "sftp_password")
|
||||
config.PrivateKey = getSecretFromFormField(r, "sftp_private_key")
|
||||
fingerprintsFormValue := r.Form.Get("sftp_fingerprints")
|
||||
config.Fingerprints = getSliceFromDelimitedValues(fingerprintsFormValue, "\n")
|
||||
config.Prefix = r.Form.Get("sftp_prefix")
|
||||
return config
|
||||
}
|
||||
|
||||
func getAzureConfig(r *http.Request) (vfs.AzBlobFsConfig, error) {
|
||||
var err error
|
||||
config := vfs.AzBlobFsConfig{}
|
||||
|
@ -532,26 +544,29 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er
|
|||
provider = int(dataprovider.LocalFilesystemProvider)
|
||||
}
|
||||
fs.Provider = dataprovider.FilesystemProvider(provider)
|
||||
if fs.Provider == dataprovider.S3FilesystemProvider {
|
||||
switch fs.Provider {
|
||||
case dataprovider.S3FilesystemProvider:
|
||||
config, err := getS3Config(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.S3Config = config
|
||||
} else if fs.Provider == dataprovider.GCSFilesystemProvider {
|
||||
config, err := getGCSConfig(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.GCSConfig = config
|
||||
} else if fs.Provider == dataprovider.AzureBlobFilesystemProvider {
|
||||
case dataprovider.AzureBlobFilesystemProvider:
|
||||
config, err := getAzureConfig(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.AzBlobConfig = config
|
||||
} else if fs.Provider == dataprovider.CryptedFilesystemProvider {
|
||||
case dataprovider.GCSFilesystemProvider:
|
||||
config, err := getGCSConfig(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.GCSConfig = config
|
||||
case dataprovider.CryptedFilesystemProvider:
|
||||
fs.CryptConfig.Passphrase = getSecretFromFormField(r, "crypt_passphrase")
|
||||
case dataprovider.SFTPFilesystemProvider:
|
||||
fs.SFTPConfig = getSFTPConfig(r)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
@ -722,15 +737,10 @@ func handleWebUpdateUserPost(w http.ResponseWriter, r *http.Request) {
|
|||
if len(updatedUser.Password) == 0 {
|
||||
updatedUser.Password = user.Password
|
||||
}
|
||||
if !updatedUser.FsConfig.S3Config.AccessSecret.IsPlain() && !updatedUser.FsConfig.S3Config.AccessSecret.IsEmpty() {
|
||||
updatedUser.FsConfig.S3Config.AccessSecret = user.FsConfig.S3Config.AccessSecret
|
||||
}
|
||||
if !updatedUser.FsConfig.AzBlobConfig.AccountKey.IsPlain() && !updatedUser.FsConfig.AzBlobConfig.AccountKey.IsEmpty() {
|
||||
updatedUser.FsConfig.AzBlobConfig.AccountKey = user.FsConfig.AzBlobConfig.AccountKey
|
||||
}
|
||||
if !updatedUser.FsConfig.CryptConfig.Passphrase.IsPlain() && !updatedUser.FsConfig.CryptConfig.Passphrase.IsEmpty() {
|
||||
updatedUser.FsConfig.CryptConfig.Passphrase = user.FsConfig.CryptConfig.Passphrase
|
||||
}
|
||||
updateEncryptedSecrets(&updatedUser, user.FsConfig.S3Config.AccessSecret, user.FsConfig.AzBlobConfig.AccountKey,
|
||||
user.FsConfig.GCSConfig.Credentials, user.FsConfig.CryptConfig.Passphrase, user.FsConfig.SFTPConfig.Password,
|
||||
user.FsConfig.SFTPConfig.PrivateKey)
|
||||
|
||||
err = dataprovider.UpdateUser(updatedUser)
|
||||
if err == nil {
|
||||
if len(r.Form.Get("disconnect")) > 0 {
|
||||
|
|
|
@ -235,6 +235,13 @@ func (s *Secret) IsPlain() bool {
|
|||
return s.provider.GetStatus() == SecretStatusPlain
|
||||
}
|
||||
|
||||
// IsNotPlainAndNotEmpty returns true if the secret is not plain and not empty.
|
||||
// This is an utility method, we update the secret for an existing user
|
||||
// if it is empty or plain
|
||||
func (s *Secret) IsNotPlainAndNotEmpty() bool {
|
||||
return !s.IsPlain() && !s.IsEmpty()
|
||||
}
|
||||
|
||||
// IsRedacted returns true if the secret is redacted
|
||||
func (s *Secret) IsRedacted() bool {
|
||||
return s.provider.GetStatus() == SecretStatusRedacted
|
||||
|
|
|
@ -243,28 +243,47 @@ func (s *Service) configurePortableUser() string {
|
|||
s.PortableUser.Password = b.String()
|
||||
printablePassword = s.PortableUser.Password
|
||||
}
|
||||
s.configurePortableSecrets()
|
||||
return printablePassword
|
||||
}
|
||||
|
||||
func (s *Service) configurePortableSecrets() {
|
||||
// we created the user before to initialize the KMS so we need to create the secret here
|
||||
switch s.PortableUser.FsConfig.Provider {
|
||||
case dataprovider.S3FilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.S3Config.AccessSecret.GetPayload()
|
||||
s.PortableUser.FsConfig.S3Config.AccessSecret = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.S3Config.AccessSecret = kms.NewPlainSecret(payload)
|
||||
}
|
||||
case dataprovider.GCSFilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.GCSConfig.Credentials.GetPayload()
|
||||
s.PortableUser.FsConfig.GCSConfig.Credentials = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret(payload)
|
||||
}
|
||||
case dataprovider.AzureBlobFilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.AzBlobConfig.AccountKey.GetPayload()
|
||||
s.PortableUser.FsConfig.AzBlobConfig.AccountKey = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.AzBlobConfig.AccountKey = kms.NewPlainSecret(payload)
|
||||
}
|
||||
case dataprovider.CryptedFilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.CryptConfig.Passphrase.GetPayload()
|
||||
s.PortableUser.FsConfig.CryptConfig.Passphrase = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret(payload)
|
||||
}
|
||||
case dataprovider.SFTPFilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.SFTPConfig.Password.GetPayload()
|
||||
s.PortableUser.FsConfig.SFTPConfig.Password = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.SFTPConfig.Password = kms.NewPlainSecret(payload)
|
||||
}
|
||||
payload = s.PortableUser.FsConfig.SFTPConfig.PrivateKey.GetPayload()
|
||||
s.PortableUser.FsConfig.SFTPConfig.PrivateKey = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.SFTPConfig.PrivateKey = kms.NewPlainSecret(payload)
|
||||
}
|
||||
}
|
||||
return printablePassword
|
||||
}
|
||||
|
|
|
@ -298,7 +298,7 @@ func TestQuotaScanCryptFs(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetMimeType(t *testing.T) {
|
||||
func TestGetMimeTypeCryptFs(t *testing.T) {
|
||||
usePubKey := true
|
||||
user, _, err := httpd.AddUser(getTestUserWithCryptFs(usePubKey), http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -101,7 +101,7 @@ func (c *Connection) handleFilewrite(request *sftp.Request) (sftp.WriterAtReader
|
|||
}
|
||||
|
||||
var errForRead error
|
||||
if !vfs.IsLocalOsFs(c.Fs) && request.Pflags().Read {
|
||||
if !vfs.IsLocalOrSFTPFs(c.Fs) && request.Pflags().Read {
|
||||
// read and write mode is only supported for local filesystem
|
||||
errForRead = sftp.ErrSSHFxOpUnsupported
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ func (c *Connection) handleSFTPUploadToExistingFile(pflags sftp.FileOpenFlags, r
|
|||
minWriteOffset = fileSize
|
||||
initialSize = fileSize
|
||||
} else {
|
||||
if vfs.IsLocalOsFs(c.Fs) && isTruncate {
|
||||
if vfs.IsLocalOrSFTPFs(c.Fs) && isTruncate {
|
||||
vfolder, err := c.User.GetVirtualFolderForPath(path.Dir(requestPath))
|
||||
if err == nil {
|
||||
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, 0, -fileSize, false) //nolint:errcheck
|
||||
|
|
|
@ -211,7 +211,7 @@ func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead
|
|||
|
||||
initialSize := int64(0)
|
||||
if !isNewFile {
|
||||
if vfs.IsLocalOsFs(c.connection.Fs) {
|
||||
if vfs.IsLocalOrSFTPFs(c.connection.Fs) {
|
||||
vfolder, err := c.connection.User.GetVirtualFolderForPath(path.Dir(requestPath))
|
||||
if err == nil {
|
||||
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, 0, -fileSize, false) //nolint:errcheck
|
||||
|
|
|
@ -309,15 +309,10 @@ func (c *Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.Serve
|
|||
loginType := sconn.Permissions.Extensions["sftpgo_login_method"]
|
||||
connectionID := hex.EncodeToString(sconn.SessionID())
|
||||
|
||||
fs, err := user.GetFilesystem(connectionID)
|
||||
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "could not create filesystem for user %#v err: %v", user.Username, err)
|
||||
if err = checkRootPath(&user, connectionID); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fs.CheckRootPath(user.Username, user.GetUID(), user.GetGID())
|
||||
|
||||
logger.Log(logger.LevelInfo, common.ProtocolSSH, connectionID,
|
||||
"User id: %d, logged in with: %#v, username: %#v, home_dir: %#v remote addr: %#v",
|
||||
user.ID, loginType, user.Username, user.HomeDir, remoteAddr.String())
|
||||
|
@ -359,24 +354,30 @@ func (c *Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.Serve
|
|||
switch req.Type {
|
||||
case "subsystem":
|
||||
if string(req.Payload[4:]) == "sftp" {
|
||||
ok = true
|
||||
fs, err := user.GetFilesystem(connectionID)
|
||||
if err == nil {
|
||||
ok = true
|
||||
connection := Connection{
|
||||
BaseConnection: common.NewBaseConnection(connID, common.ProtocolSFTP, user, fs),
|
||||
ClientVersion: string(sconn.ClientVersion()),
|
||||
RemoteAddr: remoteAddr,
|
||||
channel: channel,
|
||||
}
|
||||
go c.handleSftpConnection(channel, &connection)
|
||||
}
|
||||
}
|
||||
case "exec":
|
||||
// protocol will be set later inside processSSHCommand it could be SSH or SCP
|
||||
fs, err := user.GetFilesystem(connectionID)
|
||||
if err == nil {
|
||||
connection := Connection{
|
||||
BaseConnection: common.NewBaseConnection(connID, common.ProtocolSFTP, user, fs),
|
||||
BaseConnection: common.NewBaseConnection(connID, "sshd_exec", user, fs),
|
||||
ClientVersion: string(sconn.ClientVersion()),
|
||||
RemoteAddr: remoteAddr,
|
||||
channel: channel,
|
||||
}
|
||||
go c.handleSftpConnection(channel, &connection)
|
||||
ok = processSSHCommand(req.Payload, &connection, c.EnabledSSHCommands)
|
||||
}
|
||||
case "exec":
|
||||
// protocol will be set later inside processSSHCommand it could be SSH or SCP
|
||||
connection := Connection{
|
||||
BaseConnection: common.NewBaseConnection(connID, "sshd_exec", user, fs),
|
||||
ClientVersion: string(sconn.ClientVersion()),
|
||||
RemoteAddr: remoteAddr,
|
||||
channel: channel,
|
||||
}
|
||||
ok = processSSHCommand(req.Payload, &connection, c.EnabledSSHCommands)
|
||||
}
|
||||
req.Reply(ok, nil) //nolint:errcheck
|
||||
}
|
||||
|
@ -419,6 +420,21 @@ func (c *Configuration) createHandler(connection *Connection) sftp.Handlers {
|
|||
}
|
||||
}
|
||||
|
||||
func checkRootPath(user *dataprovider.User, connectionID string) error {
|
||||
if user.FsConfig.Provider != dataprovider.SFTPFilesystemProvider {
|
||||
// for sftp fs check root path does nothing so don't open a useless SFTP connection
|
||||
fs, err := user.GetFilesystem(connectionID)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "could not create filesystem for user %#v err: %v", user.Username, err)
|
||||
return err
|
||||
}
|
||||
|
||||
fs.CheckRootPath(user.Username, user.GetUID(), user.GetGID())
|
||||
fs.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loginUser(user dataprovider.User, loginMethod, publicKey string, conn ssh.ConnMetadata) (*ssh.Permissions, error) {
|
||||
connectionID := ""
|
||||
if conn != nil {
|
||||
|
|
1807
sftpd/sftpd_test.go
1807
sftpd/sftpd_test.go
File diff suppressed because it is too large
Load diff
|
@ -82,6 +82,7 @@ func processSSHCommand(payload []byte, connection *Connection, enabledSSHCommand
|
|||
connection.Log(logger.LevelInfo, "ssh command not enabled/supported: %#v", name)
|
||||
}
|
||||
}
|
||||
connection.Fs.Close()
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -310,18 +310,11 @@
|
|||
<option value="1" {{if eq .User.FsConfig.Provider 1 }}selected{{end}}>AWS S3 (Compatible)</option>
|
||||
<option value="2" {{if eq .User.FsConfig.Provider 2 }}selected{{end}}>Google Cloud Storage</option>
|
||||
<option value="3" {{if eq .User.FsConfig.Provider 3 }}selected{{end}}>Azure Blob Storage</option>
|
||||
<option value="5" {{if eq .User.FsConfig.Provider 5 }}selected{{end}}>SFTP</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row crypt">
|
||||
<label for="idCryptPassphrase" class="col-sm-2 col-form-label">Passphrase</label>
|
||||
<div class="col-sm-10">
|
||||
<input type="text" class="form-control" id="idCryptPassphrase" name="crypt_passphrase" placeholder=""
|
||||
value="{{if .User.FsConfig.CryptConfig.Passphrase.IsEncrypted}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.CryptConfig.Passphrase.GetPayload}}{{end}}" maxlength="1000">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row s3">
|
||||
<label for="idS3Bucket" class="col-sm-2 col-form-label">Bucket</label>
|
||||
<div class="col-sm-3">
|
||||
|
@ -526,6 +519,66 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row crypt">
|
||||
<label for="idCryptPassphrase" class="col-sm-2 col-form-label">Passphrase</label>
|
||||
<div class="col-sm-10">
|
||||
<input type="text" class="form-control" id="idCryptPassphrase" name="crypt_passphrase" placeholder=""
|
||||
value="{{if .User.FsConfig.CryptConfig.Passphrase.IsEncrypted}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.CryptConfig.Passphrase.GetPayload}}{{end}}" maxlength="1000">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row sftp">
|
||||
<label for="idSFTPEndpoint" class="col-sm-2 col-form-label">Endpoint</label>
|
||||
<div class="col-sm-3">
|
||||
<input type="text" class="form-control" id="idSFTPEndpoint" name="sftp_endpoint" placeholder=""
|
||||
value="{{.User.FsConfig.SFTPConfig.Endpoint}}" maxlength="255">
|
||||
</div>
|
||||
<div class="col-sm-2"></div>
|
||||
<label for="idSFTPUsername" class="col-sm-2 col-form-label">Username</label>
|
||||
<div class="col-sm-3">
|
||||
<input type="text" class="form-control" id="idSFTPUsername" name="sftp_username" placeholder=""
|
||||
value="{{.User.FsConfig.SFTPConfig.Username}}" maxlength="255">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row sftp">
|
||||
<label for="idSFTPPassword" class="col-sm-2 col-form-label">Password</label>
|
||||
<div class="col-sm-10">
|
||||
<input type="text" class="form-control" id="idSFTPPassword" name="sftp_password" placeholder=""
|
||||
value="{{if .User.FsConfig.SFTPConfig.Password.IsEncrypted}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.SFTPConfig.Password.GetPayload}}{{end}}" maxlength="1000">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row sftp">
|
||||
<label for="idSFTPPrivateKey" class="col-sm-2 col-form-label">Private key</label>
|
||||
<div class="col-sm-10">
|
||||
<textarea type="text" class="form-control" id="idSFTPPrivateKey" name="sftp_private_key"
|
||||
rows="3">{{if .User.FsConfig.SFTPConfig.PrivateKey.IsEncrypted}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.SFTPConfig.PrivateKey.GetPayload}}{{end}}</textarea>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row sftp">
|
||||
<label for="idSFTPFingerprints" class="col-sm-2 col-form-label">Fingerprints</label>
|
||||
<div class="col-sm-10">
|
||||
<textarea class="form-control" id="idSFTPFingerprints" name="sftp_fingerprints" rows="3"
|
||||
aria-describedby="SFTPFingerprintsHelpBlock">{{range .User.FsConfig.SFTPConfig.Fingerprints}}{{.}} {{end}}</textarea>
|
||||
<small id="SFTPFingerprintsHelpBlock" class="form-text text-muted">
|
||||
SHA256 fingerprints to validate when connecting to the external SFTP server, one per line. If empty any host key will be accepted: this is a security risk!
|
||||
</small>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row sftp">
|
||||
<label for="idSFTPPrefix" class="col-sm-2 col-form-label">Prefix</label>
|
||||
<div class="col-sm-10">
|
||||
<input type="text" class="form-control" id="idSFTPPrefix" name="sftp_prefix" placeholder=""
|
||||
value="{{.User.FsConfig.SFTPConfig.Prefix}}" maxlength="255" aria-describedby="SFTPPrefixHelpBlock">
|
||||
<small id="SFTPPrefixHelpBlock" class="form-text text-muted">
|
||||
Similar to a chroot for local filesystem. Example: "/somedir/subdir".
|
||||
</small>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group row">
|
||||
<label for="idAdditionalInfo" class="col-sm-2 col-form-label">Additional info</label>
|
||||
<div class="col-sm-10">
|
||||
|
@ -602,6 +655,7 @@
|
|||
$('.form-group.row.azblob').hide();
|
||||
$('.form-group.azblob').hide();
|
||||
$('.form-group.crypt').hide();
|
||||
$('.form-group.sftp').hide();
|
||||
$('.form-group.row.s3').show();
|
||||
} else if (val == '2'){
|
||||
$('.form-group.row.gcs').show();
|
||||
|
@ -610,6 +664,7 @@
|
|||
$('.form-group.azblob').hide();
|
||||
$('.form-group.crypt').hide();
|
||||
$('.form-group.row.s3').hide();
|
||||
$('.form-group.sftp').hide();
|
||||
} else if (val == '3'){
|
||||
$('.form-group.row.azblob').show();
|
||||
$('.form-group.azblob').show();
|
||||
|
@ -617,6 +672,7 @@
|
|||
$('.form-group.gcs').hide();
|
||||
$('.form-group.crypt').hide();
|
||||
$('.form-group.row.s3').hide();
|
||||
$('.form-group.sftp').hide();
|
||||
} else if (val == '4'){
|
||||
$('.form-group.row.gcs').hide();
|
||||
$('.form-group.gcs').hide();
|
||||
|
@ -624,6 +680,15 @@
|
|||
$('.form-group.row.azblob').hide();
|
||||
$('.form-group.azblob').hide();
|
||||
$('.form-group.crypt').show();
|
||||
$('.form-group.sftp').hide();
|
||||
} else if (val == '5'){
|
||||
$('.form-group.row.gcs').hide();
|
||||
$('.form-group.gcs').hide();
|
||||
$('.form-group.row.s3').hide();
|
||||
$('.form-group.row.azblob').hide();
|
||||
$('.form-group.azblob').hide();
|
||||
$('.form-group.crypt').hide();
|
||||
$('.form-group.sftp').show();
|
||||
} else {
|
||||
$('.form-group.row.gcs').hide();
|
||||
$('.form-group.gcs').hide();
|
||||
|
@ -631,6 +696,7 @@
|
|||
$('.form-group.row.azblob').hide();
|
||||
$('.form-group.azblob').hide();
|
||||
$('.form-group.crypt').hide();
|
||||
$('.form-group.sftp').hide();
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
|
|
@ -37,7 +37,7 @@ var maxTryTimeout = time.Hour * 24 * 365
|
|||
type AzureBlobFs struct {
|
||||
connectionID string
|
||||
localTempDir string
|
||||
config AzBlobFsConfig
|
||||
config *AzBlobFsConfig
|
||||
svc *azblob.ServiceURL
|
||||
containerURL azblob.ContainerURL
|
||||
ctxTimeout time.Duration
|
||||
|
@ -53,11 +53,11 @@ func NewAzBlobFs(connectionID, localTempDir string, config AzBlobFsConfig) (Fs,
|
|||
fs := &AzureBlobFs{
|
||||
connectionID: connectionID,
|
||||
localTempDir: localTempDir,
|
||||
config: config,
|
||||
config: &config,
|
||||
ctxTimeout: 30 * time.Second,
|
||||
ctxLongTimeout: 300 * time.Second,
|
||||
}
|
||||
if err := ValidateAzBlobFsConfig(&fs.config); err != nil {
|
||||
if err := fs.config.Validate(); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
if fs.config.AccountKey.IsEncrypted() {
|
||||
|
@ -695,6 +695,11 @@ func (fs *AzureBlobFs) GetMimeType(name string) (string, error) {
|
|||
return response.ContentType(), nil
|
||||
}
|
||||
|
||||
// Close closes the fs
|
||||
func (*AzureBlobFs) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *AzureBlobFs) isEqual(key string, virtualName string) bool {
|
||||
if key == virtualName {
|
||||
return true
|
||||
|
|
|
@ -32,7 +32,7 @@ type CryptFs struct {
|
|||
|
||||
// NewCryptFs returns a CryptFs object
|
||||
func NewCryptFs(connectionID, rootDir string, config CryptFsConfig) (Fs, error) {
|
||||
if err := ValidateCryptFsConfig(&config); err != nil {
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.Passphrase.IsEncrypted() {
|
||||
|
|
11
vfs/gcsfs.go
11
vfs/gcsfs.go
|
@ -37,7 +37,7 @@ var (
|
|||
type GCSFs struct {
|
||||
connectionID string
|
||||
localTempDir string
|
||||
config GCSFsConfig
|
||||
config *GCSFsConfig
|
||||
svc *storage.Client
|
||||
ctxTimeout time.Duration
|
||||
ctxLongTimeout time.Duration
|
||||
|
@ -53,11 +53,11 @@ func NewGCSFs(connectionID, localTempDir string, config GCSFsConfig) (Fs, error)
|
|||
fs := &GCSFs{
|
||||
connectionID: connectionID,
|
||||
localTempDir: localTempDir,
|
||||
config: config,
|
||||
config: &config,
|
||||
ctxTimeout: 30 * time.Second,
|
||||
ctxLongTimeout: 300 * time.Second,
|
||||
}
|
||||
if err = ValidateGCSFsConfig(&fs.config, fs.config.CredentialFile); err != nil {
|
||||
if err = fs.config.Validate(fs.config.CredentialFile); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
@ -749,3 +749,8 @@ func (fs *GCSFs) GetMimeType(name string) (string, error) {
|
|||
}
|
||||
return attrs.ContentType, nil
|
||||
}
|
||||
|
||||
// Close closes the fs
|
||||
func (fs *GCSFs) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
|
16
vfs/osfs.go
16
vfs/osfs.go
|
@ -291,14 +291,15 @@ func (fs *OsFs) ResolvePath(sftpPath string) (string, error) {
|
|||
// path chain until we hit a directory that _does_ exist and can be validated.
|
||||
_, err = fs.findFirstExistingDir(r, basePath)
|
||||
if err != nil {
|
||||
fsLog(fs, logger.LevelWarn, "error resolving non-existent path: %#v", err)
|
||||
fsLog(fs, logger.LevelWarn, "error resolving non-existent path %#v", err)
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
err = fs.isSubDir(p, basePath)
|
||||
if err != nil {
|
||||
fsLog(fs, logger.LevelWarn, "Invalid path resolution, dir: %#v outside user home: %#v err: %v", p, fs.rootDir, err)
|
||||
fsLog(fs, logger.LevelWarn, "Invalid path resolution, dir %#v original path %#v resolved %#v err: %v",
|
||||
p, sftpPath, r, err)
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
@ -427,13 +428,11 @@ func (fs *OsFs) isSubDir(sub, rootPath string) error {
|
|||
return nil
|
||||
}
|
||||
if len(sub) < len(parent) {
|
||||
err = fmt.Errorf("path %#v is not inside: %#v", sub, parent)
|
||||
fsLog(fs, logger.LevelWarn, "error: %v ", err)
|
||||
err = fmt.Errorf("path %#v is not inside %#v", sub, parent)
|
||||
return err
|
||||
}
|
||||
if !strings.HasPrefix(sub, parent+string(os.PathSeparator)) {
|
||||
err = fmt.Errorf("path %#v is not inside: %#v", sub, parent)
|
||||
fsLog(fs, logger.LevelWarn, "error: %v ", err)
|
||||
err = fmt.Errorf("path %#v is not inside %#v", sub, parent)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -473,3 +472,8 @@ func (fs *OsFs) GetMimeType(name string) (string, error) {
|
|||
_, err = f.Seek(0, io.SeekStart)
|
||||
return ctype, err
|
||||
}
|
||||
|
||||
// Close closes the fs
|
||||
func (*OsFs) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
|
13
vfs/s3fs.go
13
vfs/s3fs.go
|
@ -31,7 +31,7 @@ import (
|
|||
type S3Fs struct {
|
||||
connectionID string
|
||||
localTempDir string
|
||||
config S3FsConfig
|
||||
config *S3FsConfig
|
||||
svc *s3.S3
|
||||
ctxTimeout time.Duration
|
||||
ctxLongTimeout time.Duration
|
||||
|
@ -47,11 +47,11 @@ func NewS3Fs(connectionID, localTempDir string, config S3FsConfig) (Fs, error) {
|
|||
fs := &S3Fs{
|
||||
connectionID: connectionID,
|
||||
localTempDir: localTempDir,
|
||||
config: config,
|
||||
config: &config,
|
||||
ctxTimeout: 30 * time.Second,
|
||||
ctxLongTimeout: 300 * time.Second,
|
||||
}
|
||||
if err := ValidateS3FsConfig(&fs.config); err != nil {
|
||||
if err := fs.config.Validate(); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
awsConfig := aws.NewConfig()
|
||||
|
@ -542,7 +542,7 @@ func (fs *S3Fs) GetRelativePath(name string) string {
|
|||
if rel == "." {
|
||||
rel = ""
|
||||
}
|
||||
if !strings.HasPrefix(rel, "/") {
|
||||
if !path.IsAbs(rel) {
|
||||
return "/" + rel
|
||||
}
|
||||
if fs.config.KeyPrefix != "" {
|
||||
|
@ -697,3 +697,8 @@ func (fs *S3Fs) GetMimeType(name string) (string, error) {
|
|||
}
|
||||
return *obj.ContentType, err
|
||||
}
|
||||
|
||||
// Close closes the fs
|
||||
func (*S3Fs) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
|
562
vfs/sftpfs.go
Normal file
562
vfs/sftpfs.go
Normal file
|
@ -0,0 +1,562 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/eikenb/pipeat"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/rs/xid"
|
||||
"golang.org/x/crypto/ssh"
|
||||
|
||||
"github.com/drakkan/sftpgo/kms"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/drakkan/sftpgo/version"
|
||||
)
|
||||
|
||||
const (
|
||||
// osFsName is the name for the local Fs implementation
|
||||
sftpFsName = "sftpfs"
|
||||
)
|
||||
|
||||
// SFTPFsConfig defines the configuration for SFTP based filesystem
|
||||
type SFTPFsConfig struct {
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password *kms.Secret `json:"password,omitempty"`
|
||||
PrivateKey *kms.Secret `json:"private_key,omitempty"`
|
||||
Fingerprints []string `json:"fingerprints,omitempty"`
|
||||
// Prefix is the path prefix to strip from SFTP resource paths.
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
}
|
||||
|
||||
func (c *SFTPFsConfig) setEmptyCredentialsIfNil() {
|
||||
if c.Password == nil {
|
||||
c.Password = kms.NewEmptySecret()
|
||||
}
|
||||
if c.PrivateKey == nil {
|
||||
c.PrivateKey = kms.NewEmptySecret()
|
||||
}
|
||||
}
|
||||
|
||||
// Validate returns an error if the configuration is not valid
|
||||
func (c *SFTPFsConfig) Validate() error {
|
||||
c.setEmptyCredentialsIfNil()
|
||||
if c.Endpoint == "" {
|
||||
return errors.New("endpoint cannot be empty")
|
||||
}
|
||||
if c.Username == "" {
|
||||
return errors.New("username cannot be empty")
|
||||
}
|
||||
if c.Password.IsEmpty() && c.PrivateKey.IsEmpty() {
|
||||
return errors.New("credentials cannot be empty")
|
||||
}
|
||||
if c.Password.IsEncrypted() && !c.Password.IsValid() {
|
||||
return errors.New("invalid encrypted password")
|
||||
}
|
||||
if !c.Password.IsEmpty() && !c.Password.IsValidInput() {
|
||||
return errors.New("invalid password")
|
||||
}
|
||||
if c.PrivateKey.IsEncrypted() && !c.PrivateKey.IsValid() {
|
||||
return errors.New("invalid encrypted private key")
|
||||
}
|
||||
if !c.PrivateKey.IsEmpty() && !c.PrivateKey.IsValidInput() {
|
||||
return errors.New("invalid private key")
|
||||
}
|
||||
if c.Prefix != "" {
|
||||
c.Prefix = utils.CleanPath(c.Prefix)
|
||||
} else {
|
||||
c.Prefix = "/"
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncryptCredentials encrypts password and/or private key if they are in plain text
|
||||
func (c *SFTPFsConfig) EncryptCredentials(additionalData string) error {
|
||||
if c.Password.IsPlain() {
|
||||
c.Password.SetAdditionalData(additionalData)
|
||||
if err := c.Password.Encrypt(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if c.PrivateKey.IsPlain() {
|
||||
c.PrivateKey.SetAdditionalData(additionalData)
|
||||
if err := c.PrivateKey.Encrypt(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SFTPFs is a Fs implementation for SFTP backends
|
||||
type SFTPFs struct {
|
||||
connectionID string
|
||||
config *SFTPFsConfig
|
||||
sshClient *ssh.Client
|
||||
sftpClient *sftp.Client
|
||||
err chan error
|
||||
}
|
||||
|
||||
// NewSFTPFs returns an SFTPFa object that allows to interact with an SFTP server
|
||||
func NewSFTPFs(connectionID string, config SFTPFsConfig) (Fs, error) {
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !config.Password.IsEmpty() && config.Password.IsEncrypted() {
|
||||
if err := config.Password.Decrypt(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if !config.PrivateKey.IsEmpty() && config.PrivateKey.IsEncrypted() {
|
||||
if err := config.PrivateKey.Decrypt(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
sftpFs := &SFTPFs{
|
||||
connectionID: connectionID,
|
||||
config: &config,
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
err := sftpFs.createConnection()
|
||||
return sftpFs, err
|
||||
}
|
||||
|
||||
// Name returns the name for the Fs implementation
|
||||
func (fs *SFTPFs) Name() string {
|
||||
return fmt.Sprintf("%v %#v", sftpFsName, fs.config.Endpoint)
|
||||
}
|
||||
|
||||
// ConnectionID returns the connection ID associated to this Fs implementation
|
||||
func (fs *SFTPFs) ConnectionID() string {
|
||||
return fs.connectionID
|
||||
}
|
||||
|
||||
// Stat returns a FileInfo describing the named file
|
||||
func (fs *SFTPFs) Stat(name string) (os.FileInfo, error) {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.sftpClient.Stat(name)
|
||||
}
|
||||
|
||||
// Lstat returns a FileInfo describing the named file
|
||||
func (fs *SFTPFs) Lstat(name string) (os.FileInfo, error) {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.sftpClient.Lstat(name)
|
||||
}
|
||||
|
||||
// Open opens the named file for reading
|
||||
func (fs *SFTPFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, func(), error) {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
f, err := fs.sftpClient.Open(name)
|
||||
return f, nil, nil, err
|
||||
}
|
||||
|
||||
// Create creates or opens the named file for writing
|
||||
func (fs *SFTPFs) Create(name string, flag int) (File, *PipeWriter, func(), error) {
|
||||
err := fs.checkConnection()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
var f File
|
||||
if flag == 0 {
|
||||
f, err = fs.sftpClient.Create(name)
|
||||
} else {
|
||||
f, err = fs.sftpClient.OpenFile(name, flag)
|
||||
}
|
||||
return f, nil, nil, err
|
||||
}
|
||||
|
||||
// Rename renames (moves) source to target.
|
||||
func (fs *SFTPFs) Rename(source, target string) error {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.sftpClient.Rename(source, target)
|
||||
}
|
||||
|
||||
// Remove removes the named file or (empty) directory.
|
||||
func (fs *SFTPFs) Remove(name string, isDir bool) error {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.sftpClient.Remove(name)
|
||||
}
|
||||
|
||||
// Mkdir creates a new directory with the specified name and default permissions
|
||||
func (fs *SFTPFs) Mkdir(name string) error {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.sftpClient.Mkdir(name)
|
||||
}
|
||||
|
||||
// Symlink creates source as a symbolic link to target.
|
||||
func (fs *SFTPFs) Symlink(source, target string) error {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.sftpClient.Symlink(source, target)
|
||||
}
|
||||
|
||||
// Readlink returns the destination of the named symbolic link
|
||||
func (fs *SFTPFs) Readlink(name string) (string, error) {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fs.sftpClient.ReadLink(name)
|
||||
}
|
||||
|
||||
// Chown changes the numeric uid and gid of the named file.
|
||||
func (fs *SFTPFs) Chown(name string, uid int, gid int) error {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.sftpClient.Chown(name, uid, gid)
|
||||
}
|
||||
|
||||
// Chmod changes the mode of the named file to mode.
|
||||
func (fs *SFTPFs) Chmod(name string, mode os.FileMode) error {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.sftpClient.Chmod(name, mode)
|
||||
}
|
||||
|
||||
// Chtimes changes the access and modification times of the named file.
|
||||
func (fs *SFTPFs) Chtimes(name string, atime, mtime time.Time) error {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.sftpClient.Chtimes(name, atime, mtime)
|
||||
}
|
||||
|
||||
// Truncate changes the size of the named file.
|
||||
func (fs *SFTPFs) Truncate(name string, size int64) error {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.sftpClient.Truncate(name, size)
|
||||
}
|
||||
|
||||
// ReadDir reads the directory named by dirname and returns
|
||||
// a list of directory entries.
|
||||
func (fs *SFTPFs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.sftpClient.ReadDir(dirname)
|
||||
}
|
||||
|
||||
// IsUploadResumeSupported returns true if upload resume is supported.
|
||||
func (*SFTPFs) IsUploadResumeSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsAtomicUploadSupported returns true if atomic upload is supported.
|
||||
func (*SFTPFs) IsAtomicUploadSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsNotExist returns a boolean indicating whether the error is known to
|
||||
// report that a file or directory does not exist
|
||||
func (*SFTPFs) IsNotExist(err error) bool {
|
||||
return os.IsNotExist(err)
|
||||
}
|
||||
|
||||
// IsPermission returns a boolean indicating whether the error is known to
|
||||
// report that permission is denied.
|
||||
func (*SFTPFs) IsPermission(err error) bool {
|
||||
return os.IsPermission(err)
|
||||
}
|
||||
|
||||
// IsNotSupported returns true if the error indicate an unsupported operation
|
||||
func (*SFTPFs) IsNotSupported(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return err == ErrVfsUnsupported
|
||||
}
|
||||
|
||||
// CheckRootPath creates the specified local root directory if it does not exists
|
||||
func (fs *SFTPFs) CheckRootPath(username string, uid int, gid int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ScanRootDirContents returns the number of files contained in a directory and
|
||||
// their size
|
||||
func (fs *SFTPFs) ScanRootDirContents() (int, int64, error) {
|
||||
return fs.GetDirSize(fs.config.Prefix)
|
||||
}
|
||||
|
||||
// GetAtomicUploadPath returns the path to use for an atomic upload
|
||||
func (*SFTPFs) GetAtomicUploadPath(name string) string {
|
||||
dir := path.Dir(name)
|
||||
guid := xid.New().String()
|
||||
return path.Join(dir, ".sftpgo-upload."+guid+"."+path.Base(name))
|
||||
}
|
||||
|
||||
// GetRelativePath returns the path for a file relative to the sftp prefix if any.
|
||||
// This is the path as seen by SFTPGo users
|
||||
func (fs *SFTPFs) GetRelativePath(name string) string {
|
||||
rel := path.Clean(name)
|
||||
if rel == "." {
|
||||
rel = ""
|
||||
}
|
||||
if !path.IsAbs(rel) {
|
||||
return "/" + rel
|
||||
}
|
||||
if fs.config.Prefix != "/" {
|
||||
if !strings.HasPrefix(rel, fs.config.Prefix) {
|
||||
rel = "/"
|
||||
}
|
||||
rel = path.Clean("/" + strings.TrimPrefix(rel, fs.config.Prefix))
|
||||
}
|
||||
return rel
|
||||
}
|
||||
|
||||
// Walk walks the file tree rooted at root, calling walkFn for each file or
|
||||
// directory in the tree, including root
|
||||
func (fs *SFTPFs) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
walker := fs.sftpClient.Walk(root)
|
||||
for walker.Step() {
|
||||
err := walker.Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = walkFn(walker.Path(), walker.Stat(), err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Join joins any number of path elements into a single path
|
||||
func (*SFTPFs) Join(elem ...string) string {
|
||||
return path.Join(elem...)
|
||||
}
|
||||
|
||||
// HasVirtualFolders returns true if folders are emulated
|
||||
func (*SFTPFs) HasVirtualFolders() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ResolvePath returns the matching filesystem path for the specified virtual path
|
||||
func (fs *SFTPFs) ResolvePath(virtualPath string) (string, error) {
|
||||
if !path.IsAbs(virtualPath) {
|
||||
virtualPath = path.Clean("/" + virtualPath)
|
||||
}
|
||||
fsPath := fs.Join(fs.config.Prefix, virtualPath)
|
||||
if fs.config.Prefix != "/" && fsPath != "/" {
|
||||
// we need to check if this path is a symlink outside the given prefix
|
||||
// or a file/dir inside a dir symlinked outside the prefix
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
var validatedPath string
|
||||
var err error
|
||||
validatedPath, err = fs.getRealPath(fsPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
fsLog(fs, logger.LevelWarn, "Invalid path resolution, original path %v resolved %#v err: %v",
|
||||
virtualPath, fsPath, err)
|
||||
return "", err
|
||||
} else if os.IsNotExist(err) {
|
||||
for os.IsNotExist(err) {
|
||||
validatedPath = path.Dir(validatedPath)
|
||||
if validatedPath == "/" {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
validatedPath, err = fs.getRealPath(validatedPath)
|
||||
}
|
||||
if err != nil {
|
||||
fsLog(fs, logger.LevelWarn, "Invalid path resolution, dir %#v original path %#v resolved %#v err: %v",
|
||||
validatedPath, virtualPath, fsPath, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if err := fs.isSubDir(validatedPath); err != nil {
|
||||
fsLog(fs, logger.LevelWarn, "Invalid path resolution, dir %#v original path %#v resolved %#v err: %v",
|
||||
validatedPath, virtualPath, fsPath, err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return fsPath, nil
|
||||
}
|
||||
|
||||
// getRealPath returns the real remote path trying to resolve symbolic links if any
|
||||
func (fs *SFTPFs) getRealPath(name string) (string, error) {
|
||||
info, err := fs.sftpClient.Lstat(name)
|
||||
if err != nil {
|
||||
return name, err
|
||||
}
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
return fs.sftpClient.ReadLink(name)
|
||||
}
|
||||
return name, err
|
||||
}
|
||||
|
||||
func (fs *SFTPFs) isSubDir(name string) error {
|
||||
if name == fs.config.Prefix {
|
||||
return nil
|
||||
}
|
||||
if len(name) < len(fs.config.Prefix) {
|
||||
err := fmt.Errorf("path %#v is not inside: %#v", name, fs.config.Prefix)
|
||||
return err
|
||||
}
|
||||
if !strings.HasPrefix(name, fs.config.Prefix+"/") {
|
||||
err := fmt.Errorf("path %#v is not inside: %#v", name, fs.config.Prefix)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDirSize returns the number of files and the size for a folder
|
||||
// including any subfolders
|
||||
func (fs *SFTPFs) GetDirSize(dirname string) (int, int64, error) {
|
||||
numFiles := 0
|
||||
size := int64(0)
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return numFiles, size, err
|
||||
}
|
||||
isDir, err := IsDirectory(fs, dirname)
|
||||
if err == nil && isDir {
|
||||
walker := fs.sftpClient.Walk(dirname)
|
||||
for walker.Step() {
|
||||
err := walker.Err()
|
||||
if err != nil {
|
||||
return numFiles, size, err
|
||||
}
|
||||
if walker.Stat().Mode().IsRegular() {
|
||||
size += walker.Stat().Size()
|
||||
numFiles++
|
||||
}
|
||||
}
|
||||
}
|
||||
return numFiles, size, err
|
||||
}
|
||||
|
||||
// GetMimeType returns the content type
|
||||
func (fs *SFTPFs) GetMimeType(name string) (string, error) {
|
||||
if err := fs.checkConnection(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
f, err := fs.sftpClient.OpenFile(name, os.O_RDONLY)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
var buf [512]byte
|
||||
n, err := io.ReadFull(f, buf[:])
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return "", err
|
||||
}
|
||||
ctype := http.DetectContentType(buf[:n])
|
||||
// Rewind file.
|
||||
_, err = f.Seek(0, io.SeekStart)
|
||||
return ctype, err
|
||||
}
|
||||
|
||||
// Close the connection
|
||||
func (fs *SFTPFs) Close() error {
|
||||
var sftpErr, sshErr error
|
||||
if fs.sftpClient != nil {
|
||||
sftpErr = fs.sftpClient.Close()
|
||||
}
|
||||
if fs.sshClient != nil {
|
||||
sshErr = fs.sshClient.Close()
|
||||
}
|
||||
if sftpErr != nil {
|
||||
return sftpErr
|
||||
}
|
||||
return sshErr
|
||||
}
|
||||
|
||||
func (fs *SFTPFs) checkConnection() error {
|
||||
err := fs.closed()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fs.createConnection()
|
||||
}
|
||||
|
||||
func (fs *SFTPFs) createConnection() error {
|
||||
var err error
|
||||
clientConfig := &ssh.ClientConfig{
|
||||
User: fs.config.Username,
|
||||
HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||
if len(fs.config.Fingerprints) > 0 {
|
||||
fp := ssh.FingerprintSHA256(key)
|
||||
for _, provided := range fs.config.Fingerprints {
|
||||
if provided == fp {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Invalid fingerprint %#v", fp)
|
||||
}
|
||||
fsLog(fs, logger.LevelWarn, "login without host key validation, please provide at least a fingerprint!")
|
||||
return nil
|
||||
},
|
||||
ClientVersion: fmt.Sprintf("SSH-2.0-SFTPGo_%v", version.Get().Version),
|
||||
}
|
||||
if fs.config.PrivateKey.GetPayload() != "" {
|
||||
signer, err := ssh.ParsePrivateKey([]byte(fs.config.PrivateKey.GetPayload()))
|
||||
if err != nil {
|
||||
fs.err <- err
|
||||
return err
|
||||
}
|
||||
clientConfig.Auth = append(clientConfig.Auth, ssh.PublicKeys(signer))
|
||||
}
|
||||
if fs.config.Password.GetPayload() != "" {
|
||||
clientConfig.Auth = append(clientConfig.Auth, ssh.Password(fs.config.Password.GetPayload()))
|
||||
}
|
||||
fs.sshClient, err = ssh.Dial("tcp", fs.config.Endpoint, clientConfig)
|
||||
if err != nil {
|
||||
fs.err <- err
|
||||
return err
|
||||
}
|
||||
fs.sftpClient, err = sftp.NewClient(fs.sshClient)
|
||||
if err != nil {
|
||||
fs.sshClient.Close()
|
||||
fs.err <- err
|
||||
return err
|
||||
}
|
||||
go fs.wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *SFTPFs) wait() {
|
||||
// we wait on the sftp client otherwise if the channel is closed but not the connection
|
||||
// we don't detect the event.
|
||||
fs.err <- fs.sftpClient.Wait()
|
||||
fsLog(fs, logger.LevelDebug, "sftp channel closed")
|
||||
if fs.sshClient != nil {
|
||||
fs.sshClient.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *SFTPFs) closed() error {
|
||||
select {
|
||||
case err := <-fs.err:
|
||||
return err
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
320
vfs/vfs.go
320
vfs/vfs.go
|
@ -57,6 +57,7 @@ type Fs interface {
|
|||
Join(elem ...string) string
|
||||
HasVirtualFolders() bool
|
||||
GetMimeType(name string) (string, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
// File defines an interface representing a SFTPGo file
|
||||
|
@ -129,6 +130,66 @@ type S3FsConfig struct {
|
|||
UploadConcurrency int `json:"upload_concurrency,omitempty"`
|
||||
}
|
||||
|
||||
func (c *S3FsConfig) checkCredentials() error {
|
||||
if c.AccessKey == "" && !c.AccessSecret.IsEmpty() {
|
||||
return errors.New("access_key cannot be empty with access_secret not empty")
|
||||
}
|
||||
if c.AccessSecret.IsEmpty() && c.AccessKey != "" {
|
||||
return errors.New("access_secret cannot be empty with access_key not empty")
|
||||
}
|
||||
if c.AccessSecret.IsEncrypted() && !c.AccessSecret.IsValid() {
|
||||
return errors.New("invalid encrypted access_secret")
|
||||
}
|
||||
if !c.AccessSecret.IsEmpty() && !c.AccessSecret.IsValidInput() {
|
||||
return errors.New("invalid access_secret")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncryptCredentials encrypts access secret if it is in plain text
|
||||
func (c *S3FsConfig) EncryptCredentials(additionalData string) error {
|
||||
if c.AccessSecret.IsPlain() {
|
||||
c.AccessSecret.SetAdditionalData(additionalData)
|
||||
err := c.AccessSecret.Encrypt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate returns an error if the configuration is not valid
|
||||
func (c *S3FsConfig) Validate() error {
|
||||
if c.AccessSecret == nil {
|
||||
c.AccessSecret = kms.NewEmptySecret()
|
||||
}
|
||||
if c.Bucket == "" {
|
||||
return errors.New("bucket cannot be empty")
|
||||
}
|
||||
if c.Region == "" {
|
||||
return errors.New("region cannot be empty")
|
||||
}
|
||||
if err := c.checkCredentials(); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.KeyPrefix != "" {
|
||||
if strings.HasPrefix(c.KeyPrefix, "/") {
|
||||
return errors.New("key_prefix cannot start with /")
|
||||
}
|
||||
c.KeyPrefix = path.Clean(c.KeyPrefix)
|
||||
if !strings.HasSuffix(c.KeyPrefix, "/") {
|
||||
c.KeyPrefix += "/"
|
||||
}
|
||||
}
|
||||
if c.UploadPartSize != 0 && (c.UploadPartSize < 5 || c.UploadPartSize > 5000) {
|
||||
return errors.New("upload_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
|
||||
}
|
||||
if c.UploadConcurrency < 0 || c.UploadConcurrency > 64 {
|
||||
return fmt.Errorf("invalid upload concurrency: %v", c.UploadConcurrency)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GCSFsConfig defines the configuration for Google Cloud Storage based filesystem
|
||||
type GCSFsConfig struct {
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
|
@ -146,6 +207,38 @@ type GCSFsConfig struct {
|
|||
StorageClass string `json:"storage_class,omitempty"`
|
||||
}
|
||||
|
||||
// Validate returns an error if the configuration is not valid
|
||||
func (c *GCSFsConfig) Validate(credentialsFilePath string) error {
|
||||
if c.Credentials == nil {
|
||||
c.Credentials = kms.NewEmptySecret()
|
||||
}
|
||||
if c.Bucket == "" {
|
||||
return errors.New("bucket cannot be empty")
|
||||
}
|
||||
if c.KeyPrefix != "" {
|
||||
if strings.HasPrefix(c.KeyPrefix, "/") {
|
||||
return errors.New("key_prefix cannot start with /")
|
||||
}
|
||||
c.KeyPrefix = path.Clean(c.KeyPrefix)
|
||||
if !strings.HasSuffix(c.KeyPrefix, "/") {
|
||||
c.KeyPrefix += "/"
|
||||
}
|
||||
}
|
||||
if c.Credentials.IsEncrypted() && !c.Credentials.IsValid() {
|
||||
return errors.New("invalid encrypted credentials")
|
||||
}
|
||||
if !c.Credentials.IsValidInput() && c.AutomaticCredentials == 0 {
|
||||
fi, err := os.Stat(credentialsFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid credentials %v", err)
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
return errors.New("credentials cannot be empty")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AzBlobFsConfig defines the configuration for Azure Blob Storage based filesystem
|
||||
type AzBlobFsConfig struct {
|
||||
Container string `json:"container,omitempty"`
|
||||
|
@ -183,11 +276,93 @@ type AzBlobFsConfig struct {
|
|||
AccessTier string `json:"access_tier,omitempty"`
|
||||
}
|
||||
|
||||
// EncryptCredentials encrypts access secret if it is in plain text
|
||||
func (c *AzBlobFsConfig) EncryptCredentials(additionalData string) error {
|
||||
if c.AccountKey.IsPlain() {
|
||||
c.AccountKey.SetAdditionalData(additionalData)
|
||||
if err := c.AccountKey.Encrypt(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AzBlobFsConfig) checkCredentials() error {
|
||||
if c.AccountName == "" || !c.AccountKey.IsValidInput() {
|
||||
return errors.New("credentials cannot be empty or invalid")
|
||||
}
|
||||
if c.AccountKey.IsEncrypted() && !c.AccountKey.IsValid() {
|
||||
return errors.New("invalid encrypted account_key")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate returns an error if the configuration is not valid
|
||||
func (c *AzBlobFsConfig) Validate() error {
|
||||
if c.AccountKey == nil {
|
||||
c.AccountKey = kms.NewEmptySecret()
|
||||
}
|
||||
if c.SASURL != "" {
|
||||
_, err := url.Parse(c.SASURL)
|
||||
return err
|
||||
}
|
||||
if c.Container == "" {
|
||||
return errors.New("container cannot be empty")
|
||||
}
|
||||
if err := c.checkCredentials(); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.KeyPrefix != "" {
|
||||
if strings.HasPrefix(c.KeyPrefix, "/") {
|
||||
return errors.New("key_prefix cannot start with /")
|
||||
}
|
||||
c.KeyPrefix = path.Clean(c.KeyPrefix)
|
||||
if !strings.HasSuffix(c.KeyPrefix, "/") {
|
||||
c.KeyPrefix += "/"
|
||||
}
|
||||
}
|
||||
if c.UploadPartSize < 0 || c.UploadPartSize > 100 {
|
||||
return fmt.Errorf("invalid upload part size: %v", c.UploadPartSize)
|
||||
}
|
||||
if c.UploadConcurrency < 0 || c.UploadConcurrency > 64 {
|
||||
return fmt.Errorf("invalid upload concurrency: %v", c.UploadConcurrency)
|
||||
}
|
||||
if !utils.IsStringInSlice(c.AccessTier, validAzAccessTier) {
|
||||
return fmt.Errorf("invalid access tier %#v, valid values: \"''%v\"", c.AccessTier, strings.Join(validAzAccessTier, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CryptFsConfig defines the configuration to store local files as encrypted
|
||||
type CryptFsConfig struct {
|
||||
Passphrase *kms.Secret `json:"passphrase,omitempty"`
|
||||
}
|
||||
|
||||
// EncryptCredentials encrypts access secret if it is in plain text
|
||||
func (c *CryptFsConfig) EncryptCredentials(additionalData string) error {
|
||||
if c.Passphrase.IsPlain() {
|
||||
c.Passphrase.SetAdditionalData(additionalData)
|
||||
if err := c.Passphrase.Encrypt(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate returns an error if the configuration is not valid
|
||||
func (c *CryptFsConfig) Validate() error {
|
||||
if c.Passphrase == nil || c.Passphrase.IsEmpty() {
|
||||
return errors.New("invalid passphrase")
|
||||
}
|
||||
if !c.Passphrase.IsValidInput() {
|
||||
return errors.New("passphrase cannot be empty or invalid")
|
||||
}
|
||||
if c.Passphrase.IsEncrypted() && !c.Passphrase.IsValid() {
|
||||
return errors.New("invalid encrypted passphrase")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PipeWriter defines a wrapper for pipeat.PipeWriterAt.
|
||||
type PipeWriter struct {
|
||||
writer *pipeat.PipeWriterAt
|
||||
|
@ -247,149 +422,22 @@ func IsCryptOsFs(fs Fs) bool {
|
|||
return fs.Name() == cryptFsName
|
||||
}
|
||||
|
||||
func checkS3Credentials(config *S3FsConfig) error {
|
||||
if config.AccessKey == "" && !config.AccessSecret.IsEmpty() {
|
||||
return errors.New("access_key cannot be empty with access_secret not empty")
|
||||
}
|
||||
if config.AccessSecret.IsEmpty() && config.AccessKey != "" {
|
||||
return errors.New("access_secret cannot be empty with access_key not empty")
|
||||
}
|
||||
if config.AccessSecret.IsEncrypted() && !config.AccessSecret.IsValid() {
|
||||
return errors.New("invalid encrypted access_secret")
|
||||
}
|
||||
if !config.AccessSecret.IsEmpty() && !config.AccessSecret.IsValidInput() {
|
||||
return errors.New("invalid access_secret")
|
||||
}
|
||||
return nil
|
||||
// IsSFTPFs returns true if fs is a SFTP filesystem
|
||||
func IsSFTPFs(fs Fs) bool {
|
||||
return strings.HasPrefix(fs.Name(), sftpFsName)
|
||||
}
|
||||
|
||||
// ValidateS3FsConfig returns nil if the specified s3 config is valid, otherwise an error
|
||||
func ValidateS3FsConfig(config *S3FsConfig) error {
|
||||
if config.AccessSecret == nil {
|
||||
config.AccessSecret = kms.NewEmptySecret()
|
||||
}
|
||||
if config.Bucket == "" {
|
||||
return errors.New("bucket cannot be empty")
|
||||
}
|
||||
if config.Region == "" {
|
||||
return errors.New("region cannot be empty")
|
||||
}
|
||||
if err := checkS3Credentials(config); err != nil {
|
||||
return err
|
||||
}
|
||||
if config.KeyPrefix != "" {
|
||||
if strings.HasPrefix(config.KeyPrefix, "/") {
|
||||
return errors.New("key_prefix cannot start with /")
|
||||
}
|
||||
config.KeyPrefix = path.Clean(config.KeyPrefix)
|
||||
if !strings.HasSuffix(config.KeyPrefix, "/") {
|
||||
config.KeyPrefix += "/"
|
||||
}
|
||||
}
|
||||
if config.UploadPartSize != 0 && (config.UploadPartSize < 5 || config.UploadPartSize > 5000) {
|
||||
return errors.New("upload_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
|
||||
}
|
||||
if config.UploadConcurrency < 0 || config.UploadConcurrency > 64 {
|
||||
return fmt.Errorf("invalid upload concurrency: %v", config.UploadConcurrency)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateGCSFsConfig returns nil if the specified GCS config is valid, otherwise an error
|
||||
func ValidateGCSFsConfig(config *GCSFsConfig, credentialsFilePath string) error {
|
||||
if config.Credentials == nil {
|
||||
config.Credentials = kms.NewEmptySecret()
|
||||
}
|
||||
if config.Bucket == "" {
|
||||
return errors.New("bucket cannot be empty")
|
||||
}
|
||||
if config.KeyPrefix != "" {
|
||||
if strings.HasPrefix(config.KeyPrefix, "/") {
|
||||
return errors.New("key_prefix cannot start with /")
|
||||
}
|
||||
config.KeyPrefix = path.Clean(config.KeyPrefix)
|
||||
if !strings.HasSuffix(config.KeyPrefix, "/") {
|
||||
config.KeyPrefix += "/"
|
||||
}
|
||||
}
|
||||
if config.Credentials.IsEncrypted() && !config.Credentials.IsValid() {
|
||||
return errors.New("invalid encrypted credentials")
|
||||
}
|
||||
if !config.Credentials.IsValidInput() && config.AutomaticCredentials == 0 {
|
||||
fi, err := os.Stat(credentialsFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid credentials %v", err)
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
return errors.New("credentials cannot be empty")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkAzCredentials(config *AzBlobFsConfig) error {
|
||||
if config.AccountName == "" || !config.AccountKey.IsValidInput() {
|
||||
return errors.New("credentials cannot be empty or invalid")
|
||||
}
|
||||
if config.AccountKey.IsEncrypted() && !config.AccountKey.IsValid() {
|
||||
return errors.New("invalid encrypted account_key")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateAzBlobFsConfig returns nil if the specified Azure Blob config is valid, otherwise an error
|
||||
func ValidateAzBlobFsConfig(config *AzBlobFsConfig) error {
|
||||
if config.AccountKey == nil {
|
||||
config.AccountKey = kms.NewEmptySecret()
|
||||
}
|
||||
if config.SASURL != "" {
|
||||
_, err := url.Parse(config.SASURL)
|
||||
return err
|
||||
}
|
||||
if config.Container == "" {
|
||||
return errors.New("container cannot be empty")
|
||||
}
|
||||
if err := checkAzCredentials(config); err != nil {
|
||||
return err
|
||||
}
|
||||
if config.KeyPrefix != "" {
|
||||
if strings.HasPrefix(config.KeyPrefix, "/") {
|
||||
return errors.New("key_prefix cannot start with /")
|
||||
}
|
||||
config.KeyPrefix = path.Clean(config.KeyPrefix)
|
||||
if !strings.HasSuffix(config.KeyPrefix, "/") {
|
||||
config.KeyPrefix += "/"
|
||||
}
|
||||
}
|
||||
if config.UploadPartSize < 0 || config.UploadPartSize > 100 {
|
||||
return fmt.Errorf("invalid upload part size: %v", config.UploadPartSize)
|
||||
}
|
||||
if config.UploadConcurrency < 0 || config.UploadConcurrency > 64 {
|
||||
return fmt.Errorf("invalid upload concurrency: %v", config.UploadConcurrency)
|
||||
}
|
||||
if !utils.IsStringInSlice(config.AccessTier, validAzAccessTier) {
|
||||
return fmt.Errorf("invalid access tier %#v, valid values: \"''%v\"", config.AccessTier, strings.Join(validAzAccessTier, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateCryptFsConfig returns nil if the specified CryptFs config is valid, otherwise an error
|
||||
func ValidateCryptFsConfig(config *CryptFsConfig) error {
|
||||
if config.Passphrase == nil || config.Passphrase.IsEmpty() {
|
||||
return errors.New("invalid passphrase")
|
||||
}
|
||||
if !config.Passphrase.IsValidInput() {
|
||||
return errors.New("passphrase cannot be empty or invalid")
|
||||
}
|
||||
if config.Passphrase.IsEncrypted() && !config.Passphrase.IsValid() {
|
||||
return errors.New("invalid encrypted passphrase")
|
||||
}
|
||||
return nil
|
||||
// IsLocalOrSFTPFs returns true if fs is local or SFTP
|
||||
func IsLocalOrSFTPFs(fs Fs) bool {
|
||||
return IsLocalOsFs(fs) || IsSFTPFs(fs)
|
||||
}
|
||||
|
||||
// SetPathPermissions calls fs.Chown.
|
||||
// It does nothing for local filesystem on windows
|
||||
func SetPathPermissions(fs Fs, path string, uid int, gid int) {
|
||||
if uid == -1 && gid == -1 {
|
||||
return
|
||||
}
|
||||
if IsLocalOsFs(fs) {
|
||||
if runtime.GOOS == "windows" {
|
||||
return
|
||||
|
|
|
@ -159,7 +159,7 @@ func (c *Connection) getFile(fsPath, virtualPath string) (webdav.File, error) {
|
|||
|
||||
// for cloud fs we open the file when we receive the first read to avoid to download the first part of
|
||||
// the file if it was opened only to do a stat or a readdir and so it is not a real download
|
||||
if vfs.IsLocalOsFs(c.Fs) {
|
||||
if vfs.IsLocalOrSFTPFs(c.Fs) {
|
||||
file, r, cancelFn, err = c.Fs.Open(fsPath, 0)
|
||||
if err != nil {
|
||||
c.Log(logger.LevelWarn, "could not open file %#v for reading: %+v", fsPath, err)
|
||||
|
@ -261,7 +261,7 @@ func (c *Connection) handleUploadToExistingFile(resolvedPath, filePath string, f
|
|||
return nil, c.GetFsError(err)
|
||||
}
|
||||
initialSize := int64(0)
|
||||
if vfs.IsLocalOsFs(c.Fs) {
|
||||
if vfs.IsLocalOrSFTPFs(c.Fs) {
|
||||
vfolder, err := c.User.GetVirtualFolderForPath(path.Dir(requestPath))
|
||||
if err == nil {
|
||||
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, 0, -fileSize, false) //nolint:errcheck
|
||||
|
|
|
@ -205,9 +205,13 @@ func (s *webDavServer) authenticate(r *http.Request) (dataprovider.User, bool, w
|
|||
cachedUser.Expiration = time.Now().Add(time.Duration(s.config.Cache.Users.ExpirationTime) * time.Minute)
|
||||
}
|
||||
dataprovider.CacheWebDAVUser(cachedUser, s.config.Cache.Users.MaxSize)
|
||||
tempFs, err := user.GetFilesystem("temp")
|
||||
if err == nil {
|
||||
tempFs.CheckRootPath(user.Username, user.UID, user.GID)
|
||||
if user.FsConfig.Provider != dataprovider.SFTPFilesystemProvider {
|
||||
// for sftp fs check root path does nothing so don't open a useless SFTP connection
|
||||
tempFs, err := user.GetFilesystem("temp")
|
||||
if err == nil {
|
||||
tempFs.CheckRootPath(user.Username, user.UID, user.GID)
|
||||
tempFs.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
return user, false, lockSystem, nil
|
||||
|
|
|
@ -40,6 +40,7 @@ const (
|
|||
logSender = "webavdTesting"
|
||||
webDavServerAddr = "127.0.0.1:9090"
|
||||
webDavServerPort = 9090
|
||||
sftpServerAddr = "127.0.0.1:9022"
|
||||
defaultUsername = "test_user_dav"
|
||||
defaultPassword = "test_password"
|
||||
configDir = ".."
|
||||
|
@ -140,6 +141,11 @@ func TestMain(m *testing.M) {
|
|||
httpdConf.BindPort = 8078
|
||||
httpd.SetBaseURLAndCredentials("http://127.0.0.1:8078", "", "")
|
||||
|
||||
// required to test sftpfs
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.BindPort = 9022
|
||||
sftpdConf.HostKeys = []string{filepath.Join(os.TempDir(), "id_ecdsa")}
|
||||
|
||||
webDavConf := config.GetWebDAVDConfig()
|
||||
webDavConf.BindPort = webDavServerPort
|
||||
webDavConf.Cors = webdavd.Cors{
|
||||
|
@ -182,8 +188,17 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
logger.Debug(logSender, "", "initializing SFTP server with config %+v", sftpdConf)
|
||||
if err := sftpdConf.Initialize(configDir); err != nil {
|
||||
logger.ErrorToConsole("could not start SFTP server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
waitTCPListening(fmt.Sprintf("%s:%d", webDavConf.BindAddress, webDavConf.BindPort))
|
||||
waitTCPListening(fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort))
|
||||
waitTCPListening(fmt.Sprintf("%s:%d", sftpdConf.BindAddress, sftpdConf.BindPort))
|
||||
webdavd.ReloadTLSCertificate() //nolint:errcheck
|
||||
|
||||
exitCode := m.Run()
|
||||
|
@ -218,76 +233,87 @@ func TestInitialization(t *testing.T) {
|
|||
func TestBasicHandling(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.QuotaSize = 6553600
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
localUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
assert.NoError(t, checkBasicFunc(client))
|
||||
u = getTestSFTPUser()
|
||||
u.QuotaSize = 6553600
|
||||
sftpUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
for _, user := range []dataprovider.User{localUser, sftpUser} {
|
||||
client := getWebDavClient(user)
|
||||
assert.NoError(t, checkBasicFunc(client))
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
expectedQuotaSize := testFileSize
|
||||
expectedQuotaFiles := 1
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
// overwrite an existing file
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||
err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
err = client.Rename(testFileName, testFileName+"1", false)
|
||||
assert.NoError(t, err)
|
||||
_, err = client.Stat(testFileName)
|
||||
assert.Error(t, err)
|
||||
// the webdav client hide the error we check the quota
|
||||
err = client.Remove(testFileName)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
err = client.Remove(testFileName + "1")
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles-1, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize-testFileSize, user.UsedQuotaSize)
|
||||
err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
testDir := "testdir"
|
||||
err = client.Mkdir(testDir, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = client.MkdirAll(path.Join(testDir, "sub", "sub"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = client.MkdirAll(path.Join(testDir, "sub1", "sub1"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = client.MkdirAll(path.Join(testDir, "sub2", "sub2"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(testDir, testFileName+".txt"), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(testDir, testFileName), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
files, err := client.ReadDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, files, 5)
|
||||
err = client.Copy(testDir, testDir+"_copy", false)
|
||||
assert.NoError(t, err)
|
||||
err = client.RemoveAll(testDir)
|
||||
assert.NoError(t, err)
|
||||
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
expectedQuotaSize := user.UsedQuotaSize + testFileSize
|
||||
expectedQuotaFiles := user.UsedQuotaFiles + 1
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
if user.Username == defaultUsername {
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
_, err = httpd.RemoveUser(sftpUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
_, err = httpd.RemoveUser(localUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
// overwrite an existing file
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||
err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
err = client.Rename(testFileName, testFileName+"1", false)
|
||||
assert.NoError(t, err)
|
||||
_, err = client.Stat(testFileName)
|
||||
assert.Error(t, err)
|
||||
// the webdav client hide the error we check the quota
|
||||
err = client.Remove(testFileName)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize, user.UsedQuotaSize)
|
||||
err = client.Remove(testFileName + "1")
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedQuotaFiles-1, user.UsedQuotaFiles)
|
||||
assert.Equal(t, expectedQuotaSize-testFileSize, user.UsedQuotaSize)
|
||||
err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
testDir := "testdir"
|
||||
err = client.Mkdir(testDir, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = client.MkdirAll(path.Join(testDir, "sub", "sub"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = client.MkdirAll(path.Join(testDir, "sub1", "sub1"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = client.MkdirAll(path.Join(testDir, "sub2", "sub2"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(testDir, testFileName+".txt"), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(testDir, testFileName), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
files, err := client.ReadDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, files, 5)
|
||||
err = client.Copy(testDir, testDir+"_copy", false)
|
||||
assert.NoError(t, err)
|
||||
err = client.RemoveAll(testDir)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
err = os.RemoveAll(localUser.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, common.Connections.GetStats(), 0)
|
||||
status := webdavd.GetStatus()
|
||||
|
@ -370,11 +396,18 @@ func TestBasicHandlingCryptFs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPropPatch(t *testing.T) {
|
||||
for _, u := range []dataprovider.User{getTestUser(), getTestUserWithCryptFs()} {
|
||||
u := getTestUser()
|
||||
u.Username = u.Username + "1"
|
||||
localUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
sftpUser := getTestSFTPUser()
|
||||
sftpUser.FsConfig.SFTPConfig.Username = localUser.Username
|
||||
|
||||
for _, u := range []dataprovider.User{getTestUser(), getTestUserWithCryptFs(), sftpUser} {
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
assert.NoError(t, checkBasicFunc(client))
|
||||
assert.NoError(t, checkBasicFunc(client), sftpUser.Username)
|
||||
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
|
@ -404,6 +437,10 @@ func TestPropPatch(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.Len(t, common.Connections.GetStats(), 0)
|
||||
}
|
||||
_, err = httpd.RemoveUser(localUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(localUser.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLoginInvalidPwd(t *testing.T) {
|
||||
|
@ -807,72 +844,88 @@ func TestDeniedProtocols(t *testing.T) {
|
|||
func TestQuotaLimits(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.QuotaFiles = 1
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
localUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFileSize := int64(65535)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
testFileSize1 := int64(131072)
|
||||
testFileName1 := "test_file1.dat"
|
||||
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
|
||||
err = createTestFile(testFilePath1, testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
testFileSize2 := int64(32768)
|
||||
testFileName2 := "test_file2.dat"
|
||||
testFilePath2 := filepath.Join(homeBasePath, testFileName2)
|
||||
err = createTestFile(testFilePath2, testFileSize2)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
// test quota files
|
||||
err = uploadFile(testFilePath, testFileName+".quota", testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, testFileName+".quota1", testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
err = client.Rename(testFileName+".quota", testFileName, false)
|
||||
assert.NoError(t, err)
|
||||
files, err := client.ReadDir("/")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, files, 1)
|
||||
// test quota size
|
||||
user.QuotaSize = testFileSize - 1
|
||||
user.QuotaFiles = 0
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, testFileName+".quota", testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
err = client.Rename(testFileName, testFileName+".quota", false)
|
||||
assert.NoError(t, err)
|
||||
// now test quota limits while uploading the current file, we have 1 bytes remaining
|
||||
user.QuotaSize = testFileSize + 1
|
||||
user.QuotaFiles = 0
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath1, testFileName1, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
_, err = client.Stat(testFileName1)
|
||||
assert.Error(t, err)
|
||||
err = client.Rename(testFileName+".quota", testFileName, false)
|
||||
assert.NoError(t, err)
|
||||
// overwriting an existing file will work if the resulting size is lesser or equal than the current one
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath2, testFileName, testFileSize2, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath1, testFileName, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
err = uploadFile(testFilePath2, testFileName, testFileSize2, client)
|
||||
u = getTestSFTPUser()
|
||||
u.QuotaFiles = 1
|
||||
sftpUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
for _, user := range []dataprovider.User{localUser, sftpUser} {
|
||||
testFileSize := int64(65535)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
testFileSize1 := int64(131072)
|
||||
testFileName1 := "test_file1.dat"
|
||||
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
|
||||
err = createTestFile(testFilePath1, testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
testFileSize2 := int64(32768)
|
||||
testFileName2 := "test_file2.dat"
|
||||
testFilePath2 := filepath.Join(homeBasePath, testFileName2)
|
||||
err = createTestFile(testFilePath2, testFileSize2)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
// test quota files
|
||||
err = uploadFile(testFilePath, testFileName+".quota", testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, testFileName+".quota1", testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
err = client.Rename(testFileName+".quota", testFileName, false)
|
||||
assert.NoError(t, err)
|
||||
files, err := client.ReadDir("/")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, files, 1)
|
||||
// test quota size
|
||||
user.QuotaSize = testFileSize - 1
|
||||
user.QuotaFiles = 0
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, testFileName+".quota", testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
err = client.Rename(testFileName, testFileName+".quota", false)
|
||||
assert.NoError(t, err)
|
||||
// now test quota limits while uploading the current file, we have 1 bytes remaining
|
||||
user.QuotaSize = testFileSize + 1
|
||||
user.QuotaFiles = 0
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath1, testFileName1, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
_, err = client.Stat(testFileName1)
|
||||
assert.Error(t, err)
|
||||
err = client.Rename(testFileName+".quota", testFileName, false)
|
||||
assert.NoError(t, err)
|
||||
// overwriting an existing file will work if the resulting size is lesser or equal than the current one
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath2, testFileName, testFileSize2, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath1, testFileName, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
err = uploadFile(testFilePath2, testFileName, testFileSize2, client)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.Remove(testFilePath)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath1)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath2)
|
||||
assert.NoError(t, err)
|
||||
if user.Username == defaultUsername {
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
user.QuotaFiles = 0
|
||||
user.QuotaSize = 0
|
||||
_, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
_, err = httpd.RemoveUser(sftpUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath1)
|
||||
_, err = httpd.RemoveUser(localUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath2)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
err = os.RemoveAll(localUser.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -880,34 +933,49 @@ func TestUploadMaxSize(t *testing.T) {
|
|||
testFileSize := int64(65535)
|
||||
u := getTestUser()
|
||||
u.Filters.MaxUploadFileSize = testFileSize + 1
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
localUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
u = getTestSFTPUser()
|
||||
u.Filters.MaxUploadFileSize = testFileSize + 1
|
||||
sftpUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFileSize1 := int64(131072)
|
||||
testFileName1 := "test_file_dav1.dat"
|
||||
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
|
||||
err = createTestFile(testFilePath1, testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
err = uploadFile(testFilePath1, testFileName1, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
// now test overwrite an existing file with a size bigger than the allowed one
|
||||
err = createTestFile(filepath.Join(user.GetHomeDir(), testFileName1), testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath1, testFileName1, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
for _, user := range []dataprovider.User{localUser, sftpUser} {
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
testFileSize1 := int64(131072)
|
||||
testFileName1 := "test_file_dav1.dat"
|
||||
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
|
||||
err = createTestFile(testFilePath1, testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
err = uploadFile(testFilePath1, testFileName1, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
// now test overwrite an existing file with a size bigger than the allowed one
|
||||
err = createTestFile(filepath.Join(user.GetHomeDir(), testFileName1), testFileSize1)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath1, testFileName1, testFileSize1, client)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = os.Remove(testFilePath)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath1)
|
||||
assert.NoError(t, err)
|
||||
if user.Username == defaultUsername {
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
user.Filters.MaxUploadFileSize = 65536000
|
||||
_, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
_, err = httpd.RemoveUser(sftpUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(testFilePath1)
|
||||
_, err = httpd.RemoveUser(localUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
err = os.RemoveAll(localUser.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -915,75 +983,84 @@ func TestClientClose(t *testing.T) {
|
|||
u := getTestUser()
|
||||
u.UploadBandwidth = 64
|
||||
u.DownloadBandwidth = 64
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
localUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFileSize := int64(1048576)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
u = getTestSFTPUser()
|
||||
u.UploadBandwidth = 64
|
||||
u.DownloadBandwidth = 64
|
||||
sftpUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
assert.NoError(t, checkBasicFunc(client))
|
||||
for _, user := range []dataprovider.User{localUser, sftpUser} {
|
||||
testFileSize := int64(1048576)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
client := getWebDavClient(user)
|
||||
assert.NoError(t, checkBasicFunc(client))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err = uploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
for _, stat := range common.Connections.GetStats() {
|
||||
if len(stat.Transfers) > 0 {
|
||||
return true
|
||||
assert.Eventually(t, func() bool {
|
||||
for _, stat := range common.Connections.GetStats() {
|
||||
if len(stat.Transfers) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, 1*time.Second, 50*time.Millisecond)
|
||||
return false
|
||||
}, 1*time.Second, 50*time.Millisecond)
|
||||
|
||||
for _, stat := range common.Connections.GetStats() {
|
||||
common.Connections.Close(stat.ConnectionID)
|
||||
}
|
||||
wg.Wait()
|
||||
assert.Eventually(t, func() bool { return len(common.Connections.GetStats()) == 0 },
|
||||
1*time.Second, 100*time.Millisecond)
|
||||
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
testFilePath = filepath.Join(user.HomeDir, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
for _, stat := range common.Connections.GetStats() {
|
||||
if len(stat.Transfers) > 0 {
|
||||
return true
|
||||
}
|
||||
common.Connections.Close(stat.ConnectionID)
|
||||
}
|
||||
return false
|
||||
}, 1*time.Second, 50*time.Millisecond)
|
||||
wg.Wait()
|
||||
assert.Eventually(t, func() bool { return len(common.Connections.GetStats()) == 0 },
|
||||
1*time.Second, 100*time.Millisecond)
|
||||
|
||||
for _, stat := range common.Connections.GetStats() {
|
||||
common.Connections.Close(stat.ConnectionID)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
testFilePath = filepath.Join(user.HomeDir, testFileName)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err = downloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
assert.Error(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
for _, stat := range common.Connections.GetStats() {
|
||||
if len(stat.Transfers) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, 1*time.Second, 50*time.Millisecond)
|
||||
|
||||
for _, stat := range common.Connections.GetStats() {
|
||||
common.Connections.Close(stat.ConnectionID)
|
||||
}
|
||||
wg.Wait()
|
||||
assert.Eventually(t, func() bool { return len(common.Connections.GetStats()) == 0 },
|
||||
1*time.Second, 100*time.Millisecond)
|
||||
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
wg.Wait()
|
||||
assert.Eventually(t, func() bool { return len(common.Connections.GetStats()) == 0 },
|
||||
1*time.Second, 100*time.Millisecond)
|
||||
|
||||
err = os.Remove(localDownloadPath)
|
||||
_, err = httpd.RemoveUser(sftpUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
_, err = httpd.RemoveUser(localUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
err = os.RemoveAll(localUser.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -1063,7 +1140,14 @@ func TestLoginInvalidFs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBytesRangeRequests(t *testing.T) {
|
||||
for _, u := range []dataprovider.User{getTestUser(), getTestUserWithCryptFs()} {
|
||||
u := getTestUser()
|
||||
u.Username = u.Username + "1"
|
||||
localUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
sftpUser := getTestSFTPUser()
|
||||
sftpUser.FsConfig.SFTPConfig.Username = localUser.Username
|
||||
|
||||
for _, u := range []dataprovider.User{getTestUser(), getTestUserWithCryptFs(), sftpUser} {
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFileName := "test_file.txt"
|
||||
|
@ -1107,6 +1191,10 @@ func TestBytesRangeRequests(t *testing.T) {
|
|||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = httpd.RemoveUser(localUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(localUser.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGETAsPROPFIND(t *testing.T) {
|
||||
|
@ -1274,58 +1362,73 @@ func TestUploadOverwriteVfolder(t *testing.T) {
|
|||
func TestMiscCommands(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.QuotaFiles = 100
|
||||
user, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
localUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
dir := "testDir"
|
||||
client := getWebDavClient(user)
|
||||
err = client.MkdirAll(path.Join(dir, "sub1", "sub2"), os.ModePerm)
|
||||
u = getTestSFTPUser()
|
||||
u.QuotaFiles = 100
|
||||
sftpUser, _, err := httpd.AddUser(u, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(dir, testFileName), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(dir, "sub1", testFileName), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(dir, "sub1", "sub2", testFileName), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = client.Copy(dir, dir+"_copy", false)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 6, user.UsedQuotaFiles)
|
||||
assert.Equal(t, 6*testFileSize, user.UsedQuotaSize)
|
||||
err = client.Copy(dir, dir+"_copy1", false)
|
||||
assert.NoError(t, err)
|
||||
err = client.Copy(dir+"_copy", dir+"_copy1", false)
|
||||
assert.Error(t, err)
|
||||
err = client.Copy(dir+"_copy", dir+"_copy1", true)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 9, user.UsedQuotaFiles)
|
||||
assert.Equal(t, 9*testFileSize, user.UsedQuotaSize)
|
||||
err = client.Rename(dir+"_copy1", dir+"_copy2", false)
|
||||
assert.NoError(t, err)
|
||||
err = client.Remove(path.Join(dir+"_copy", testFileName))
|
||||
assert.NoError(t, err)
|
||||
err = client.Rename(dir+"_copy2", dir+"_copy", true)
|
||||
assert.NoError(t, err)
|
||||
err = client.Copy(dir+"_copy", dir+"_copy1", false)
|
||||
assert.NoError(t, err)
|
||||
err = client.RemoveAll(dir + "_copy1")
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 6, user.UsedQuotaFiles)
|
||||
assert.Equal(t, 6*testFileSize, user.UsedQuotaSize)
|
||||
for _, user := range []dataprovider.User{localUser, sftpUser} {
|
||||
dir := "testDir"
|
||||
client := getWebDavClient(user)
|
||||
err = client.MkdirAll(path.Join(dir, "sub1", "sub2"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(dir, testFileName), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(dir, "sub1", testFileName), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = uploadFile(testFilePath, path.Join(dir, "sub1", "sub2", testFileName), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = client.Copy(dir, dir+"_copy", false)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 6, user.UsedQuotaFiles)
|
||||
assert.Equal(t, 6*testFileSize, user.UsedQuotaSize)
|
||||
err = client.Copy(dir, dir+"_copy1", false)
|
||||
assert.NoError(t, err)
|
||||
err = client.Copy(dir+"_copy", dir+"_copy1", false)
|
||||
assert.Error(t, err)
|
||||
err = client.Copy(dir+"_copy", dir+"_copy1", true)
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 9, user.UsedQuotaFiles)
|
||||
assert.Equal(t, 9*testFileSize, user.UsedQuotaSize)
|
||||
err = client.Rename(dir+"_copy1", dir+"_copy2", false)
|
||||
assert.NoError(t, err)
|
||||
err = client.Remove(path.Join(dir+"_copy", testFileName))
|
||||
assert.NoError(t, err)
|
||||
err = client.Rename(dir+"_copy2", dir+"_copy", true)
|
||||
assert.NoError(t, err)
|
||||
err = client.Copy(dir+"_copy", dir+"_copy1", false)
|
||||
assert.NoError(t, err)
|
||||
err = client.RemoveAll(dir + "_copy1")
|
||||
assert.NoError(t, err)
|
||||
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 6, user.UsedQuotaFiles)
|
||||
assert.Equal(t, 6*testFileSize, user.UsedQuotaSize)
|
||||
|
||||
err = os.Remove(testFilePath)
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
if user.Username == defaultUsername {
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
user.QuotaFiles = 0
|
||||
_, _, err = httpd.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
_, err = httpd.RemoveUser(sftpUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
_, err = httpd.RemoveUser(localUser, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
err = os.RemoveAll(localUser.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -1420,6 +1523,16 @@ func getTestUser() dataprovider.User {
|
|||
return user
|
||||
}
|
||||
|
||||
func getTestSFTPUser() dataprovider.User {
|
||||
u := getTestUser()
|
||||
u.Username = u.Username + "_sftp"
|
||||
u.FsConfig.Provider = dataprovider.SFTPFilesystemProvider
|
||||
u.FsConfig.SFTPConfig.Endpoint = sftpServerAddr
|
||||
u.FsConfig.SFTPConfig.Username = defaultUsername
|
||||
u.FsConfig.SFTPConfig.Password = kms.NewPlainSecret(defaultPassword)
|
||||
return u
|
||||
}
|
||||
|
||||
func getTestUserWithCryptFs() dataprovider.User {
|
||||
user := getTestUser()
|
||||
user.FsConfig.Provider = dataprovider.CryptedFilesystemProvider
|
||||
|
|
Loading…
Reference in a new issue