refactor: add an enum for filesystem providers

Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
This commit is contained in:
Mark Sagi-Kazar 2020-10-05 20:58:41 +02:00 committed by Nicola Murino
parent c992072286
commit 5e2db77ef9
14 changed files with 64 additions and 55 deletions

View file

@ -67,8 +67,9 @@ $ sftpgo portable
Please take a look at the usage below to customize the serving parameters`,
Run: func(cmd *cobra.Command, args []string) {
portableDir := directoryToServe
fsProvider := dataprovider.FilesystemProvider(portableFsProvider)
if !filepath.IsAbs(portableDir) {
if portableFsProvider == 0 {
if fsProvider == dataprovider.LocalFilesystemProvider {
portableDir, _ = filepath.Abs(portableDir)
} else {
portableDir = os.TempDir()
@ -77,7 +78,7 @@ Please take a look at the usage below to customize the serving parameters`,
permissions := make(map[string][]string)
permissions["/"] = portablePermissions
portableGCSCredentials := ""
if portableFsProvider == 2 && len(portableGCSCredentialsFile) > 0 {
if fsProvider == dataprovider.GCSFilesystemProvider && len(portableGCSCredentialsFile) > 0 {
fi, err := os.Stat(portableGCSCredentialsFile)
if err != nil {
fmt.Printf("Invalid GCS credentials file: %v\n", err)
@ -131,7 +132,7 @@ Please take a look at the usage below to customize the serving parameters`,
HomeDir: portableDir,
Status: 1,
FsConfig: dataprovider.Filesystem{
Provider: portableFsProvider,
Provider: dataprovider.FilesystemProvider(portableFsProvider),
S3Config: vfs.S3FsConfig{
Bucket: portableS3Bucket,
Region: portableS3Region,
@ -213,7 +214,7 @@ multicast DNS`)
advertised via multicast DNS, this
flag allows to put username/password
inside the advertised TXT record`)
portableCmd.Flags().IntVarP(&portableFsProvider, "fs-provider", "f", 0, `0 means local filesystem,
portableCmd.Flags().IntVarP(&portableFsProvider, "fs-provider", "f", int(dataprovider.LocalFilesystemProvider), `0 means local filesystem,
1 Amazon S3 compatible,
2 Google Cloud Storage`)
portableCmd.Flags().StringVar(&portableS3Bucket, "s3-bucket", "", "")

View file

@ -60,10 +60,10 @@ func newActionNotification(user *dataprovider.User, operation, filePath, target,
bucket := ""
endpoint := ""
status := 1
if user.FsConfig.Provider == 1 {
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
bucket = user.FsConfig.S3Config.Bucket
endpoint = user.FsConfig.S3Config.Endpoint
} else if user.FsConfig.Provider == 2 {
} else if user.FsConfig.Provider == dataprovider.GCSFilesystemProvider {
bucket = user.FsConfig.GCSConfig.Bucket
}
if err == ErrQuotaExceeded {
@ -78,7 +78,7 @@ func newActionNotification(user *dataprovider.User, operation, filePath, target,
TargetPath: target,
SSHCmd: sshCmd,
FileSize: fileSize,
FsProvider: user.FsConfig.Provider,
FsProvider: int(user.FsConfig.Provider),
Bucket: bucket,
Endpoint: endpoint,
Status: status,

View file

@ -20,7 +20,7 @@ func TestNewActionNotification(t *testing.T) {
user := &dataprovider.User{
Username: "username",
}
user.FsConfig.Provider = 0
user.FsConfig.Provider = dataprovider.LocalFilesystemProvider
user.FsConfig.S3Config = vfs.S3FsConfig{
Bucket: "s3bucket",
Endpoint: "endpoint",
@ -34,13 +34,13 @@ func TestNewActionNotification(t *testing.T) {
assert.Equal(t, 0, len(a.Endpoint))
assert.Equal(t, 0, a.Status)
user.FsConfig.Provider = 1
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
a = newActionNotification(user, operationDownload, "path", "target", "", ProtocolSSH, 123, nil)
assert.Equal(t, "s3bucket", a.Bucket)
assert.Equal(t, "endpoint", a.Endpoint)
assert.Equal(t, 1, a.Status)
user.FsConfig.Provider = 2
user.FsConfig.Provider = dataprovider.GCSFilesystemProvider
a = newActionNotification(user, operationDownload, "path", "target", "", ProtocolSCP, 123, ErrQuotaExceeded)
assert.Equal(t, "gcsbucket", a.Bucket)
assert.Equal(t, 0, len(a.Endpoint))

View file

@ -792,7 +792,7 @@ func validateFolderQuotaLimits(folder vfs.VirtualFolder) error {
}
func validateUserVirtualFolders(user *User) error {
if len(user.VirtualFolders) == 0 || user.FsConfig.Provider != 0 {
if len(user.VirtualFolders) == 0 || user.FsConfig.Provider != LocalFilesystemProvider {
user.VirtualFolders = []vfs.VirtualFolder{}
return nil
}
@ -968,7 +968,7 @@ func validateFilters(user *User) error {
}
func saveGCSCredentials(user *User) error {
if user.FsConfig.Provider != 2 {
if user.FsConfig.Provider != GCSFilesystemProvider {
return nil
}
if len(user.FsConfig.GCSConfig.Credentials) == 0 {
@ -987,7 +987,7 @@ func saveGCSCredentials(user *User) error {
}
func validateFilesystemConfig(user *User) error {
if user.FsConfig.Provider == 1 {
if user.FsConfig.Provider == S3FilesystemProvider {
err := vfs.ValidateS3FsConfig(&user.FsConfig.S3Config)
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not validate s3config: %v", err)}
@ -1003,14 +1003,14 @@ func validateFilesystemConfig(user *User) error {
}
}
return nil
} else if user.FsConfig.Provider == 2 {
} else if user.FsConfig.Provider == GCSFilesystemProvider {
err := vfs.ValidateGCSFsConfig(&user.FsConfig.GCSConfig, user.getGCSCredentialsFilePath())
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not validate GCS config: %v", err)}
}
return nil
}
user.FsConfig.Provider = 0
user.FsConfig.Provider = LocalFilesystemProvider
user.FsConfig.S3Config = vfs.S3FsConfig{}
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
return nil
@ -1241,16 +1241,16 @@ func comparePbkdf2PasswordAndHash(password, hashedPassword string) (bool, error)
// HideUserSensitiveData hides user sensitive data
func HideUserSensitiveData(user *User) User {
user.Password = ""
if user.FsConfig.Provider == 1 {
if user.FsConfig.Provider == S3FilesystemProvider {
user.FsConfig.S3Config.AccessSecret = utils.RemoveDecryptionKey(user.FsConfig.S3Config.AccessSecret)
} else if user.FsConfig.Provider == 2 {
} else if user.FsConfig.Provider == GCSFilesystemProvider {
user.FsConfig.GCSConfig.Credentials = ""
}
return *user
}
func addCredentialsToUser(user *User) error {
if user.FsConfig.Provider != 2 {
if user.FsConfig.Provider != GCSFilesystemProvider {
return nil
}
if user.FsConfig.GCSConfig.AutomaticCredentials > 0 {

View file

@ -119,12 +119,20 @@ type UserFilters struct {
MaxUploadFileSize int64 `json:"max_upload_file_size,omitempty"`
}
// FilesystemProvider defines the supported storages
type FilesystemProvider int
const (
LocalFilesystemProvider FilesystemProvider = iota // Local
S3FilesystemProvider // Amazon S3 compatible
GCSFilesystemProvider // Google Cloud Storage
)
// Filesystem defines cloud storage filesystem details
type Filesystem struct {
// 0 local filesystem, 1 Amazon S3 compatible, 2 Google Cloud Storage
Provider int `json:"provider"`
S3Config vfs.S3FsConfig `json:"s3config,omitempty"`
GCSConfig vfs.GCSFsConfig `json:"gcsconfig,omitempty"`
Provider FilesystemProvider `json:"provider"`
S3Config vfs.S3FsConfig `json:"s3config,omitempty"`
GCSConfig vfs.GCSFsConfig `json:"gcsconfig,omitempty"`
}
// User defines a SFTPGo user
@ -181,9 +189,9 @@ type User struct {
// GetFilesystem returns the filesystem for this user
func (u *User) GetFilesystem(connectionID string) (vfs.Fs, error) {
if u.FsConfig.Provider == 1 {
if u.FsConfig.Provider == S3FilesystemProvider {
return vfs.NewS3Fs(connectionID, u.GetHomeDir(), u.FsConfig.S3Config)
} else if u.FsConfig.Provider == 2 {
} else if u.FsConfig.Provider == GCSFilesystemProvider {
config := u.FsConfig.GCSConfig
config.CredentialFile = u.getGCSCredentialsFilePath()
return vfs.NewGCSFs(connectionID, u.GetHomeDir(), config)
@ -221,7 +229,7 @@ func (u *User) GetPermissionsForPath(p string) []string {
// If the path is not inside a virtual folder an error is returned
func (u *User) GetVirtualFolderForPath(sftpPath string) (vfs.VirtualFolder, error) {
var folder vfs.VirtualFolder
if len(u.VirtualFolders) == 0 || u.FsConfig.Provider != 0 {
if len(u.VirtualFolders) == 0 || u.FsConfig.Provider != LocalFilesystemProvider {
return folder, errNoMatchingVirtualFolder
}
dirsForPath := utils.GetDirsForSFTPPath(sftpPath)
@ -613,9 +621,9 @@ func (u *User) GetInfoString() string {
t := utils.GetTimeFromMsecSinceEpoch(u.LastLogin)
result += fmt.Sprintf("Last login: %v ", t.Format("2006-01-02 15:04:05")) // YYYY-MM-DD HH:MM:SS
}
if u.FsConfig.Provider == 1 {
if u.FsConfig.Provider == S3FilesystemProvider {
result += "Storage: S3 "
} else if u.FsConfig.Provider == 2 {
} else if u.FsConfig.Provider == GCSFilesystemProvider {
result += "Storage: GCS "
}
if len(u.PublicKeys) > 0 {

View file

@ -858,7 +858,7 @@ func TestLoginWithIPilters(t *testing.T) {
func TestLoginInvalidFs(t *testing.T) {
u := getTestUser()
u.FsConfig.Provider = 2
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = "test"
u.FsConfig.GCSConfig.Credentials = base64.StdEncoding.EncodeToString([]byte("invalid JSON for credentials"))
user, _, err := httpd.AddUser(u, http.StatusOK)

View file

@ -118,7 +118,7 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
}
currentPermissions := user.Permissions
currentS3AccessSecret := ""
if user.FsConfig.Provider == 1 {
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
currentS3AccessSecret = user.FsConfig.S3Config.AccessSecret
}
user.Permissions = make(map[string][]string)
@ -132,7 +132,7 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
user.Permissions = currentPermissions
}
// we use the new access secret if different from the old one and not empty
if user.FsConfig.Provider == 1 {
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
if utils.RemoveDecryptionKey(currentS3AccessSecret) == user.FsConfig.S3Config.AccessSecret ||
(len(user.FsConfig.S3Config.AccessSecret) == 0 && len(user.FsConfig.S3Config.AccessKey) > 0) {
user.FsConfig.S3Config.AccessSecret = currentS3AccessSecret

View file

@ -385,7 +385,7 @@ func TestAddUserInvalidFilters(t *testing.T) {
func TestAddUserInvalidFsConfig(t *testing.T) {
u := getTestUser()
u.FsConfig.Provider = 1
u.FsConfig.Provider = dataprovider.S3FilesystemProvider
u.FsConfig.S3Config.Bucket = ""
_, _, err := httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
@ -411,7 +411,7 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u = getTestUser()
u.FsConfig.Provider = 2
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = ""
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
@ -921,7 +921,7 @@ func TestUserFolderMapping(t *testing.T) {
func TestUserS3Config(t *testing.T) {
user, _, err := httpd.AddUser(getTestUser(), http.StatusOK)
assert.NoError(t, err)
user.FsConfig.Provider = 1
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
user.FsConfig.S3Config.Bucket = "test" //nolint:goconst
user.FsConfig.S3Config.Region = "us-east-1" //nolint:goconst
user.FsConfig.S3Config.AccessKey = "Server-Access-Key"
@ -938,7 +938,7 @@ func TestUserS3Config(t *testing.T) {
user.FsConfig.S3Config.AccessSecret = secret
user, _, err = httpd.AddUser(user, http.StatusOK)
assert.NoError(t, err)
user.FsConfig.Provider = 1
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
user.FsConfig.S3Config.Bucket = "test-bucket"
user.FsConfig.S3Config.Region = "us-east-1" //nolint:goconst
user.FsConfig.S3Config.AccessKey = "Server-Access-Key1"
@ -947,7 +947,7 @@ func TestUserS3Config(t *testing.T) {
user.FsConfig.S3Config.UploadConcurrency = 5
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
user.FsConfig.Provider = 0
user.FsConfig.Provider = dataprovider.LocalFilesystemProvider
user.FsConfig.S3Config.Bucket = ""
user.FsConfig.S3Config.Region = ""
user.FsConfig.S3Config.AccessKey = ""
@ -959,7 +959,7 @@ func TestUserS3Config(t *testing.T) {
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
// test user without access key and access secret (shared config state)
user.FsConfig.Provider = 1
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
user.FsConfig.S3Config.Bucket = "testbucket"
user.FsConfig.S3Config.Region = "us-east-1"
user.FsConfig.S3Config.AccessKey = ""
@ -981,7 +981,7 @@ func TestUserGCSConfig(t *testing.T) {
assert.NoError(t, err)
err = os.MkdirAll(credentialsPath, 0700)
assert.NoError(t, err)
user.FsConfig.Provider = 2
user.FsConfig.Provider = dataprovider.GCSFilesystemProvider
user.FsConfig.GCSConfig.Bucket = "test"
user.FsConfig.GCSConfig.Credentials = base64.StdEncoding.EncodeToString([]byte("fake credentials"))
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
@ -1001,7 +1001,7 @@ func TestUserGCSConfig(t *testing.T) {
user.FsConfig.GCSConfig.AutomaticCredentials = 1
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
user.FsConfig.Provider = 1
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
user.FsConfig.S3Config.Bucket = "test1"
user.FsConfig.S3Config.Region = "us-east-1"
user.FsConfig.S3Config.AccessKey = "Server-Access-Key1"
@ -1010,7 +1010,7 @@ func TestUserGCSConfig(t *testing.T) {
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir"
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
user.FsConfig.Provider = 2
user.FsConfig.Provider = dataprovider.GCSFilesystemProvider
user.FsConfig.GCSConfig.Bucket = "test1"
user.FsConfig.GCSConfig.Credentials = base64.StdEncoding.EncodeToString([]byte("fake credentials"))
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
@ -2558,7 +2558,7 @@ func TestWebUserS3Mock(t *testing.T) {
checkResponseCode(t, http.StatusOK, rr.Code)
err := render.DecodeJSON(rr.Body, &user)
assert.NoError(t, err)
user.FsConfig.Provider = 1
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
user.FsConfig.S3Config.Bucket = "test"
user.FsConfig.S3Config.Region = "eu-west-1"
user.FsConfig.S3Config.AccessKey = "access-key"
@ -2655,7 +2655,7 @@ func TestWebUserGCSMock(t *testing.T) {
credentialsFilePath := filepath.Join(os.TempDir(), "gcs.json")
err = createTestFile(credentialsFilePath, 0)
assert.NoError(t, err)
user.FsConfig.Provider = 2
user.FsConfig.Provider = dataprovider.GCSFilesystemProvider
user.FsConfig.GCSConfig.Bucket = "test"
user.FsConfig.GCSConfig.KeyPrefix = "somedir/subdir/"
user.FsConfig.GCSConfig.StorageClass = "standard"

View file

@ -116,10 +116,10 @@ func TestCheckUser(t *testing.T) {
assert.Error(t, err)
expected.Permissions = make(map[string][]string)
actual.Permissions = make(map[string][]string)
actual.FsConfig.Provider = 1
actual.FsConfig.Provider = dataprovider.S3FilesystemProvider
err = checkUser(expected, actual)
assert.Error(t, err)
actual.FsConfig.Provider = 0
actual.FsConfig.Provider = dataprovider.LocalFilesystemProvider
expected.VirtualFolders = append(expected.VirtualFolders, vfs.VirtualFolder{
BaseVirtualFolder: vfs.BaseVirtualFolder{
MappedPath: os.TempDir(),
@ -277,10 +277,10 @@ func TestCompareUserFields(t *testing.T) {
func TestCompareUserFsConfig(t *testing.T) {
expected := &dataprovider.User{}
actual := &dataprovider.User{}
expected.FsConfig.Provider = 1
expected.FsConfig.Provider = dataprovider.S3FilesystemProvider
err := compareUserFsConfig(expected, actual)
assert.Error(t, err)
expected.FsConfig.Provider = 0
expected.FsConfig.Provider = dataprovider.LocalFilesystemProvider
expected.FsConfig.S3Config.Bucket = "bucket"
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
@ -545,7 +545,7 @@ func TestQuotaScanInvalidFs(t *testing.T) {
Username: "test",
HomeDir: os.TempDir(),
FsConfig: dataprovider.Filesystem{
Provider: 1,
Provider: dataprovider.S3FilesystemProvider,
},
}
common.QuotaScans.AddUserQuotaScan(user.Username)

View file

@ -386,10 +386,10 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er
var fs dataprovider.Filesystem
provider, err := strconv.Atoi(r.Form.Get("fs_provider"))
if err != nil {
provider = 0
provider = int(dataprovider.LocalFilesystemProvider)
}
fs.Provider = provider
if fs.Provider == 1 {
fs.Provider = dataprovider.FilesystemProvider(provider)
if fs.Provider == dataprovider.S3FilesystemProvider {
fs.S3Config.Bucket = r.Form.Get("s3_bucket")
fs.S3Config.Region = r.Form.Get("s3_region")
fs.S3Config.AccessKey = r.Form.Get("s3_access_key")
@ -405,7 +405,7 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er
if err != nil {
return fs, err
}
} else if fs.Provider == 2 {
} else if fs.Provider == dataprovider.GCSFilesystemProvider {
fs.GCSConfig.Bucket = r.Form.Get("gcs_bucket")
fs.GCSConfig.StorageClass = r.Form.Get("gcs_storage_class")
fs.GCSConfig.KeyPrefix = r.Form.Get("gcs_key_prefix")

View file

@ -219,9 +219,9 @@ func (s *Service) advertiseServices(advertiseService, advertiseCredentials bool)
func (s *Service) getPortableDirToServe() string {
var dirToServe string
if s.PortableUser.FsConfig.Provider == 1 {
if s.PortableUser.FsConfig.Provider == dataprovider.S3FilesystemProvider {
dirToServe = s.PortableUser.FsConfig.S3Config.KeyPrefix
} else if s.PortableUser.FsConfig.Provider == 2 {
} else if s.PortableUser.FsConfig.Provider == dataprovider.GCSFilesystemProvider {
dirToServe = s.PortableUser.FsConfig.GCSConfig.KeyPrefix
} else {
dirToServe = s.PortableUser.HomeDir

View file

@ -733,7 +733,7 @@ func TestSSHCommandsRemoteFs(t *testing.T) {
}()
user := dataprovider.User{}
user.FsConfig = dataprovider.Filesystem{
Provider: 1,
Provider: dataprovider.S3FilesystemProvider,
S3Config: vfs.S3FsConfig{
Bucket: "s3bucket",
Endpoint: "endpoint",

View file

@ -1311,7 +1311,7 @@ func TestLoginUserExpiration(t *testing.T) {
func TestLoginInvalidFs(t *testing.T) {
usePubKey := true
u := getTestUser(usePubKey)
u.FsConfig.Provider = 2
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = "test"
u.FsConfig.GCSConfig.Credentials = base64.StdEncoding.EncodeToString([]byte("invalid JSON for credentials"))
user, _, err := httpd.AddUser(u, http.StatusOK)

View file

@ -791,7 +791,7 @@ func TestClientClose(t *testing.T) {
func TestLoginInvalidFs(t *testing.T) {
u := getTestUser()
u.FsConfig.Provider = 2
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = "test"
u.FsConfig.GCSConfig.Credentials = base64.StdEncoding.EncodeToString([]byte("invalid JSON for credentials"))
user, _, err := httpd.AddUser(u, http.StatusOK)