diff --git a/README.md b/README.md index 40ba9784..5a03751d 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) Fully featured and highly configurable SFTP server with optional FTP/S and WebDAV support, written in Go. -It can serve local filesystem, S3 or Google Cloud Storage. +It can serve local filesystem, S3 (compatible) Object Storage, Google Cloud Storage and Azure Blob Storage. ## Features @@ -173,6 +173,10 @@ Each user can be mapped to the whole bucket or to a bucket virtual folder. This Each user can be mapped with a Google Cloud Storage bucket or a bucket virtual folder. This way, the mapped bucket/virtual folder is exposed over SFTP/SCP/FTP/WebDAV. More information about Google Cloud Storage integration can be found [here](./docs/google-cloud-storage.md). +### Azure Blob Storage backend + +Each user can be mapped with an Azure Blob Storage container or a container virtual folder. This way, the mapped container/virtual folder is exposed over SFTP/SCP/FTP/WebDAV. More information about Azure Blob Storage integration can be found [here](./docs/azure-blob-storage.md). + ### Other Storage backends Adding new storage backends is quite easy: diff --git a/common/actions.go b/common/actions.go index 014246e0..24f84b95 100644 --- a/common/actions.go +++ b/common/actions.go @@ -84,6 +84,13 @@ func newActionNotification( endpoint = user.FsConfig.S3Config.Endpoint } else if user.FsConfig.Provider == dataprovider.GCSFilesystemProvider { bucket = user.FsConfig.GCSConfig.Bucket + } else if user.FsConfig.Provider == dataprovider.AzureBlobFilesystemProvider { + bucket = user.FsConfig.AzBlobConfig.Container + if user.FsConfig.AzBlobConfig.SASURL != "" { + endpoint = user.FsConfig.AzBlobConfig.SASURL + } else { + endpoint = user.FsConfig.AzBlobConfig.Endpoint + } } if err == ErrQuotaExceeded { diff --git a/common/actions_test.go b/common/actions_test.go index e35d0b5a..63d3b339 100644 --- a/common/actions_test.go +++ b/common/actions_test.go @@ -28,6 +28,11 @@ func TestNewActionNotification(t *testing.T) { user.FsConfig.GCSConfig = vfs.GCSFsConfig{ Bucket: "gcsbucket", } + user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{ + Container: "azcontainer", + SASURL: "azsasurl", + Endpoint: "azendpoint", + } a := newActionNotification(user, operationDownload, "path", "target", "", ProtocolSFTP, 123, errors.New("fake error")) assert.Equal(t, user.Username, a.Username) assert.Equal(t, 0, len(a.Bucket)) @@ -45,6 +50,18 @@ func TestNewActionNotification(t *testing.T) { assert.Equal(t, "gcsbucket", a.Bucket) assert.Equal(t, 0, len(a.Endpoint)) assert.Equal(t, 2, a.Status) + + user.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider + a = newActionNotification(user, operationDownload, "path", "target", "", ProtocolSCP, 123, nil) + assert.Equal(t, "azcontainer", a.Bucket) + assert.Equal(t, "azsasurl", a.Endpoint) + assert.Equal(t, 1, a.Status) + + user.FsConfig.AzBlobConfig.SASURL = "" + a = newActionNotification(user, operationDownload, "path", "target", "", ProtocolSCP, 123, nil) + assert.Equal(t, "azcontainer", a.Bucket) + assert.Equal(t, "azendpoint", a.Endpoint) + assert.Equal(t, 1, a.Status) } func TestActionHTTP(t *testing.T) { diff --git a/dataprovider/dataprovider.go b/dataprovider/dataprovider.go index bea725ba..0e0318f5 100644 --- a/dataprovider/dataprovider.go +++ b/dataprovider/dataprovider.go @@ -995,7 +995,7 @@ func validateFilesystemConfig(user *User) error { if err != nil { return &ValidationError{err: fmt.Sprintf("could not validate s3config: %v", err)} } - if len(user.FsConfig.S3Config.AccessSecret) > 0 { + if user.FsConfig.S3Config.AccessSecret != "" { vals := strings.Split(user.FsConfig.S3Config.AccessSecret, "$") if !strings.HasPrefix(user.FsConfig.S3Config.AccessSecret, "$aes$") || len(vals) != 4 { accessSecret, err := utils.EncryptData(user.FsConfig.S3Config.AccessSecret) @@ -1012,10 +1012,27 @@ func validateFilesystemConfig(user *User) error { return &ValidationError{err: fmt.Sprintf("could not validate GCS config: %v", err)} } return nil + } else if user.FsConfig.Provider == AzureBlobFilesystemProvider { + err := vfs.ValidateAzBlobFsConfig(&user.FsConfig.AzBlobConfig) + if err != nil { + return &ValidationError{err: fmt.Sprintf("could not validate Azure Blob config: %v", err)} + } + if user.FsConfig.AzBlobConfig.AccountKey != "" { + vals := strings.Split(user.FsConfig.AzBlobConfig.AccountKey, "$") + if !strings.HasPrefix(user.FsConfig.AzBlobConfig.AccountKey, "$aes$") || len(vals) != 4 { + accountKey, err := utils.EncryptData(user.FsConfig.AzBlobConfig.AccountKey) + if err != nil { + return &ValidationError{err: fmt.Sprintf("could not encrypt Azure blob account key: %v", err)} + } + user.FsConfig.AzBlobConfig.AccountKey = accountKey + } + } + return nil } user.FsConfig.Provider = LocalFilesystemProvider user.FsConfig.S3Config = vfs.S3FsConfig{} user.FsConfig.GCSConfig = vfs.GCSFsConfig{} + user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{} return nil } @@ -1248,6 +1265,8 @@ func HideUserSensitiveData(user *User) User { user.FsConfig.S3Config.AccessSecret = utils.RemoveDecryptionKey(user.FsConfig.S3Config.AccessSecret) } else if user.FsConfig.Provider == GCSFilesystemProvider { user.FsConfig.GCSConfig.Credentials = nil + } else if user.FsConfig.Provider == AzureBlobFilesystemProvider { + user.FsConfig.AzBlobConfig.AccountKey = utils.RemoveDecryptionKey(user.FsConfig.AzBlobConfig.AccountKey) } return *user } diff --git a/dataprovider/user.go b/dataprovider/user.go index ee349168..b3001818 100644 --- a/dataprovider/user.go +++ b/dataprovider/user.go @@ -124,16 +124,18 @@ type FilesystemProvider int // supported values for FilesystemProvider const ( - LocalFilesystemProvider FilesystemProvider = iota // Local - S3FilesystemProvider // Amazon S3 compatible - GCSFilesystemProvider // Google Cloud Storage + LocalFilesystemProvider FilesystemProvider = iota // Local + S3FilesystemProvider // Amazon S3 compatible + GCSFilesystemProvider // Google Cloud Storage + AzureBlobFilesystemProvider // Azure Blob Storage ) // Filesystem defines cloud storage filesystem details type Filesystem struct { - Provider FilesystemProvider `json:"provider"` - S3Config vfs.S3FsConfig `json:"s3config,omitempty"` - GCSConfig vfs.GCSFsConfig `json:"gcsconfig,omitempty"` + Provider FilesystemProvider `json:"provider"` + S3Config vfs.S3FsConfig `json:"s3config,omitempty"` + GCSConfig vfs.GCSFsConfig `json:"gcsconfig,omitempty"` + AzBlobConfig vfs.AzBlobFsConfig `json:"azblobconfig,omitempty"` } // User defines a SFTPGo user @@ -196,6 +198,8 @@ func (u *User) GetFilesystem(connectionID string) (vfs.Fs, error) { config := u.FsConfig.GCSConfig config.CredentialFile = u.getGCSCredentialsFilePath() return vfs.NewGCSFs(connectionID, u.GetHomeDir(), config) + } else if u.FsConfig.Provider == AzureBlobFilesystemProvider { + return vfs.NewAzBlobFs(connectionID, u.GetHomeDir(), u.FsConfig.AzBlobConfig) } return vfs.NewOsFs(connectionID, u.GetHomeDir(), u.VirtualFolders), nil } @@ -626,6 +630,8 @@ func (u *User) GetInfoString() string { result += "Storage: S3 " } else if u.FsConfig.Provider == GCSFilesystemProvider { result += "Storage: GCS " + } else if u.FsConfig.Provider == AzureBlobFilesystemProvider { + result += "Storage: Azure " } if len(u.PublicKeys) > 0 { result += fmt.Sprintf("Public keys: %v ", len(u.PublicKeys)) @@ -725,6 +731,17 @@ func (u *User) getACopy() User { StorageClass: u.FsConfig.GCSConfig.StorageClass, KeyPrefix: u.FsConfig.GCSConfig.KeyPrefix, }, + AzBlobConfig: vfs.AzBlobFsConfig{ + Container: u.FsConfig.AzBlobConfig.Container, + AccountName: u.FsConfig.AzBlobConfig.AccountName, + AccountKey: u.FsConfig.AzBlobConfig.AccountKey, + Endpoint: u.FsConfig.AzBlobConfig.Endpoint, + SASURL: u.FsConfig.AzBlobConfig.SASURL, + KeyPrefix: u.FsConfig.AzBlobConfig.KeyPrefix, + UploadPartSize: u.FsConfig.AzBlobConfig.UploadPartSize, + UploadConcurrency: u.FsConfig.AzBlobConfig.UploadConcurrency, + UseEmulator: u.FsConfig.AzBlobConfig.UseEmulator, + }, } return User{ diff --git a/docs/account.md b/docs/account.md index 5ffc6a77..a25ed3eb 100644 --- a/docs/account.md +++ b/docs/account.md @@ -45,7 +45,7 @@ For each account, the following properties can be configured: - `allowed_extensions`, list of, case insensitive, allowed files extension. Shell like expansion is not supported so you have to specify `.jpg` and not `*.jpg`. Any file that does not end with this suffix will be denied - `denied_extensions`, list of, case insensitive, denied files extension. Denied file extensions are evaluated before the allowed ones - `path`, SFTP/SCP path, if no other specific filter is defined, the filter apply for sub directories too. For example if filters are defined for the paths `/` and `/sub` then the filters for `/` are applied for any file outside the `/sub` directory -- `fs_provider`, filesystem to serve via SFTP. Local filesystem and S3 Compatible Object Storage are supported +- `fs_provider`, filesystem to serve via SFTP. Local filesystem, S3 Compatible Object Storage, Google Cloud Storage and Azure Blob Storage are supported - `s3_bucket`, required for S3 filesystem - `s3_region`, required for S3 filesystem. Must match the region for your bucket. You can find here the list of available [AWS regions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). For example if your bucket is at `Frankfurt` you have to set the region to `eu-central-1` - `s3_access_key` @@ -60,6 +60,15 @@ For each account, the following properties can be configured: - `gcs_automatic_credentials`, integer. Set to 1 to use Application Default Credentials strategy or set to 0 to use explicit credentials via `gcs_credentials` - `gcs_storage_class` - `gcs_key_prefix`, allows to restrict access to the folder identified by this prefix and its contents +- `az_container`, Azure Blob Storage container +- `az_account_name`, Azure account name. leave blank to use SAS URL +- `az_account_key`, Azure account key. leave blank to use SAS URL. If provided it is stored encrypted (AES-256-GCM) +- `az_sas_url`, Azure shared access signature URL +- `az_endpoint`, Default is "blob.core.windows.net". If you use the emulator the endpoint must include the protocol, for example "http://127.0.0.1:10000" +- `az_upload_part_size`, the buffer size for multipart uploads (MB). Zero means the default (4 MB) +- `az_upload_concurrency`, how many parts are uploaded in parallel. Zero means the default (2) +- `az_key_prefix`, allows to restrict access to the folder identified by this prefix and its contents +- `az_use_emulator`, boolean These properties are stored inside the data provider. diff --git a/docs/azure-blob-storage.md b/docs/azure-blob-storage.md new file mode 100644 index 00000000..e3e7aabc --- /dev/null +++ b/docs/azure-blob-storage.md @@ -0,0 +1,20 @@ +# Azure Blob Storage backend + +To connect SFTPGo to Azure Blob Storage, you need to specify the access credentials. Azure Blob Storage has different options for credentials, we support: + +1. Providing an account name and account key. +2. Providing a shared access signature (SAS). + +If you authenticate using account and key you also need to specify a container. The endpoint can generally be left blank, the default is `blob.core.windows.net`. + +If you provide a SAS URL the container is optional and if given it must match the one inside the shared access signature. + +If you want to connect to an emulator such as [Azurite](https://github.com/Azure/Azurite) you need to provide the account name/key pair and an endpoint prefixed with the protocol, for example `http://127.0.0.1:10000`. + +Specifying a different `key_prefix`, you can assign different "folders" of the same container to different users. This is similar to a chroot directory for local filesystem. Each SFTPGo user can only access the assigned folder and its contents. The folder identified by `key_prefix` does not need to be pre-created. + +For multipart uploads you can customize the parts size and the upload concurrency. Please note that if the upload bandwidth between the client and SFTPGo is greater than the upload bandwidth between SFTPGo and the Azure Blob service then the client should wait for the last parts to be uploaded to Azure after finishing uploading the file to SFTPGo, and it may time out. Keep this in mind if you customize these parameters. + +The configured container must exist. + +This backend is very similar to the [S3](./s3.md) backend, and it has the same limitations. diff --git a/docs/build-from-source.md b/docs/build-from-source.md index de8ebb83..ddbde318 100644 --- a/docs/build-from-source.md +++ b/docs/build-from-source.md @@ -14,6 +14,7 @@ The following build tags are available: - `nogcs`, disable Google Cloud Storage backend, default enabled - `nos3`, disable S3 Compabible Object Storage backends, default enabled +- `noazblob`, disable Azure Blob Storage backend, default enabled - `nobolt`, disable Bolt data provider, default enabled - `nomysql`, disable MySQL data provider, default enabled - `nopgsql`, disable PostgreSQL data provider, default enabled diff --git a/docs/custom-actions.md b/docs/custom-actions.md index 981db685..c8ea7a0a 100644 --- a/docs/custom-actions.md +++ b/docs/custom-actions.md @@ -23,9 +23,9 @@ The external program can also read the following environment variables: - `SFTPGO_ACTION_TARGET`, non-empty for `rename` `SFTPGO_ACTION` - `SFTPGO_ACTION_SSH_CMD`, non-empty for `ssh_cmd` `SFTPGO_ACTION` - `SFTPGO_ACTION_FILE_SIZE`, non-empty for `upload`, `download` and `delete` `SFTPGO_ACTION` -- `SFTPGO_ACTION_FS_PROVIDER`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend -- `SFTPGO_ACTION_BUCKET`, non-empty for S3 and GCS backends -- `SFTPGO_ACTION_ENDPOINT`, non-empty for S3 backend if configured +- `SFTPGO_ACTION_FS_PROVIDER`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend, `3` for Azure Blob Storage backend +- `SFTPGO_ACTION_BUCKET`, non-empty for S3, GCS and Azure backends +- `SFTPGO_ACTION_ENDPOINT`, non-empty for S3 and Azure backend if configured. For Azure this is the SAS URL, if configured otherwise the endpoint - `SFTPGO_ACTION_STATUS`, integer. 0 means a generic error occurred. 1 means no error, 2 means quota exceeded error - `SFTPGO_ACTION_PROTOCOL`, string. Possible values are `SSH`, `SFTP`, `SCP`, `FTP`, `DAV` @@ -40,9 +40,9 @@ If the `hook` defines an HTTP URL then this URL will be invoked as HTTP POST. Th - `target_path`, not null for `rename` action - `ssh_cmd`, not null for `ssh_cmd` action - `file_size`, not null for `upload`, `download`, `delete` actions -- `fs_provider`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend -- `bucket`, not null for S3 and GCS backends -- `endpoint`, not null for S3 backend if configured +- `fs_provider`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend, `3` for Azure Blob Storage backend +- `bucket`, not null for S3, GCS and Azure backends +- `endpoint`, not null for S3 and Azure backend if configured. For Azure this is the SAS URL, if configured otherwise the endpoint - `status`, integer. 0 means a generic error occurred. 1 means no error, 2 means quota exceeded error - `protocol`, string. Possible values are `SSH`, `FTP`, `DAV` diff --git a/docs/google-cloud-storage.md b/docs/google-cloud-storage.md index 772f1d23..681bee7c 100644 --- a/docs/google-cloud-storage.md +++ b/docs/google-cloud-storage.md @@ -8,6 +8,4 @@ You can optionally specify a [storage class](https://cloud.google.com/storage/do The configured bucket must exist. -Google Cloud Storage is exposed over HTTPS so if you are running SFTPGo as docker image please be sure to uncomment the line that installs `ca-certificates`, inside your `Dockerfile`, to be able to properly verify certificate authorities. - This backend is very similar to the [S3](./s3.md) backend, and it has the same limitations. diff --git a/docs/s3.md b/docs/s3.md index e7932a1d..992ce37c 100644 --- a/docs/s3.md +++ b/docs/s3.md @@ -10,13 +10,11 @@ AWS SDK has different options for credentials. [More Detail](https://docs.aws.am So, you need to provide access keys to activate option 1, or leave them blank to use the other ways to specify credentials. -Most S3 backends require HTTPS connections so if you are running SFTPGo as docker image please be sure to uncomment the line that installs `ca-certificates`, inside your `Dockerfile`, to be able to properly verify certificate authorities. - Specifying a different `key_prefix`, you can assign different "folders" of the same bucket to different users. This is similar to a chroot directory for local filesystem. Each SFTP/SCP user can only access the assigned folder and its contents. The folder identified by `key_prefix` does not need to be pre-created. SFTPGo uses multipart uploads and parallel downloads for storing and retrieving files from S3. -For multipart uploads you can customize the parts size and the upload concurrency. Please note that if the upload bandwidth between the SFTP client and SFTPGo is greater than the upload bandwidth between SFTPGo and S3 then the SFTP client have to wait for the upload of the last parts to S3 after it ends the file upload to SFTPGo, and it may time out. Keep this in mind if you customize these parameters. +For multipart uploads you can customize the parts size and the upload concurrency. Please note that if the upload bandwidth between the client and SFTPGo is greater than the upload bandwidth between SFTPGo and S3 then the client should wait for the last parts to be uploaded to S3 after finishing uploading the file to SFTPGo, and it may time out. Keep this in mind if you customize these parameters. The configured bucket must exist. @@ -32,7 +30,7 @@ Some SFTP commands don't work over S3: Other notes: - `rename` is a two step operation: server-side copy and then deletion. So, it is not atomic as for local filesystem. -- We don't support renaming non empty directories since we should rename all the contents too and this could take a long time: think about directories with thousands of files; for each file we should do an AWS API call. +- We don't support renaming non empty directories since we should rename all the contents too and this could take a long time: think about directories with thousands of files: for each file we should do an AWS API call. - For server side encryption, you have to configure the mapped bucket to automatically encrypt objects. - A local home directory is still required to store temporary files. - Clients that require advanced filesystem-like features such as `sshfs` are not supported. diff --git a/examples/rest-api-cli/sftpgo_api_cli b/examples/rest-api-cli/sftpgo_api_cli index 673acad7..56136bc3 100755 --- a/examples/rest-api-cli/sftpgo_api_cli +++ b/examples/rest-api-cli/sftpgo_api_cli @@ -82,7 +82,9 @@ class SFTPGoApiRequests: s3_key_prefix='', gcs_bucket='', gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file='', gcs_automatic_credentials='automatic', denied_login_methods=[], virtual_folders=[], denied_extensions=[], allowed_extensions=[], s3_upload_part_size=0, s3_upload_concurrency=0, - max_upload_file_size=0, denied_protocols=[]): + max_upload_file_size=0, denied_protocols=[], az_container="", az_account_name="", az_account_key="", + az_sas_url="", az_endpoint="", az_upload_part_size=0, az_upload_concurrency=0, az_key_prefix="", + az_use_emulator=False): user = {'id':user_id, 'username':username, 'uid':uid, 'gid':gid, 'max_sessions':max_sessions, 'quota_size':quota_size, 'quota_files':quota_files, 'upload_bandwidth':upload_bandwidth, 'download_bandwidth':download_bandwidth, @@ -106,7 +108,10 @@ class SFTPGoApiRequests: user.update({'filesystem':self.buildFsConfig(fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class, gcs_credentials_file, - gcs_automatic_credentials, s3_upload_part_size, s3_upload_concurrency)}) + gcs_automatic_credentials, s3_upload_part_size, s3_upload_concurrency, + az_container, az_account_name, az_account_key, az_sas_url, + az_endpoint, az_upload_part_size, az_upload_concurrency, az_key_prefix, + az_use_emulator)}) return user def buildVirtualFolders(self, vfolders): @@ -228,7 +233,9 @@ class SFTPGoApiRequests: def buildFsConfig(self, fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class, - gcs_credentials_file, gcs_automatic_credentials, s3_upload_part_size, s3_upload_concurrency): + gcs_credentials_file, gcs_automatic_credentials, s3_upload_part_size, s3_upload_concurrency, + az_container, az_account_name, az_account_key, az_sas_url, az_endpoint, az_upload_part_size, + az_upload_concurrency, az_key_prefix, az_use_emulator): fs_config = {'provider':0} if fs_provider == 'S3': s3config = {'bucket':s3_bucket, 'region':s3_region, 'access_key':s3_access_key, 'access_secret': @@ -246,6 +253,12 @@ class SFTPGoApiRequests: gcsconfig.update({'credentials':base64.b64encode(creds.read().encode('UTF-8')).decode('UTF-8'), 'automatic_credentials':0}) fs_config.update({'provider':2, 'gcsconfig':gcsconfig}) + elif fs_provider == "AzureBlob": + azureconfig = {"container":az_container, "account_name":az_account_name, "account_key":az_account_key, + "sas_url":az_sas_url, "endpoint":az_endpoint, "upload_part_size":az_upload_part_size, + "upload_concurrency":az_upload_concurrency, "key_prefix":az_key_prefix, "use_emulator": + az_use_emulator} + fs_config.update({'provider':3, 'azblobconfig':azureconfig}) return fs_config def getUsers(self, limit=100, offset=0, order='ASC', username=''): @@ -263,13 +276,17 @@ class SFTPGoApiRequests: s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='', s3_key_prefix='', gcs_bucket='', gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file='', gcs_automatic_credentials='automatic', denied_login_methods=[], virtual_folders=[], denied_extensions=[], allowed_extensions=[], - s3_upload_part_size=0, s3_upload_concurrency=0, max_upload_file_size=0, denied_protocols=[]): + s3_upload_part_size=0, s3_upload_concurrency=0, max_upload_file_size=0, denied_protocols=[], az_container="", + az_account_name="", az_account_key="", az_sas_url="", az_endpoint="", az_upload_part_size=0, + az_upload_concurrency=0, az_key_prefix="", az_use_emulator=False): u = self.buildUserObject(0, username, password, public_keys, home_dir, uid, gid, max_sessions, quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth, status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class, gcs_credentials_file, gcs_automatic_credentials, denied_login_methods, virtual_folders, denied_extensions, - allowed_extensions, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size, denied_protocols) + allowed_extensions, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size, denied_protocols, + az_container, az_account_name, az_account_key, az_sas_url, az_endpoint, az_upload_part_size, + az_upload_concurrency, az_key_prefix, az_use_emulator) r = requests.post(self.userPath, json=u, auth=self.auth, verify=self.verify) self.printResponse(r) @@ -280,13 +297,17 @@ class SFTPGoApiRequests: s3_key_prefix='', gcs_bucket='', gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file='', gcs_automatic_credentials='automatic', denied_login_methods=[], virtual_folders=[], denied_extensions=[], allowed_extensions=[], s3_upload_part_size=0, s3_upload_concurrency=0, max_upload_file_size=0, - denied_protocols=[], disconnect=0): + denied_protocols=[], disconnect=0, az_container="", + az_account_name="", az_account_key="", az_sas_url="", az_endpoint="", az_upload_part_size=0, + az_upload_concurrency=0, az_key_prefix="", az_use_emulator=False): u = self.buildUserObject(user_id, username, password, public_keys, home_dir, uid, gid, max_sessions, quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth, status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class, gcs_credentials_file, gcs_automatic_credentials, denied_login_methods, virtual_folders, denied_extensions, - allowed_extensions, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size, denied_protocols) + allowed_extensions, s3_upload_part_size, s3_upload_concurrency, max_upload_file_size, denied_protocols, + az_container, az_account_name, az_account_key, az_sas_url, az_endpoint, az_upload_part_size, + az_upload_concurrency, az_key_prefix, az_use_emulator) r = requests.put(urlparse.urljoin(self.userPath, 'user/' + str(user_id)), params={'disconnect':disconnect}, json=u, auth=self.auth, verify=self.verify) self.printResponse(r) @@ -593,7 +614,7 @@ def addCommonUserArguments(parser): parser.add_argument('--allowed-extensions', type=str, nargs='*', default=[], help='Allowed file extensions case insensitive. ' +'The format is /dir::ext1,ext2. For example: "/somedir::.jpg,.png" "/otherdir/subdir::.zip,.rar". ' + 'Default: %(default)s') - parser.add_argument('--fs', type=str, default='local', choices=['local', 'S3', 'GCS'], + parser.add_argument('--fs', type=str, default='local', choices=['local', 'S3', 'GCS', "AzureBlob"], help='Filesystem provider. Default: %(default)s') parser.add_argument('--s3-bucket', type=str, default='', help='Default: %(default)s') parser.add_argument('--s3-key-prefix', type=str, default='', help='Virtual root directory. If non empty only this ' + @@ -616,6 +637,19 @@ def addCommonUserArguments(parser): parser.add_argument('--gcs-credentials-file', type=str, default='', help='Default: %(default)s') parser.add_argument('--gcs-automatic-credentials', type=str, default='automatic', choices=['explicit', 'automatic'], help='If you provide a credentials file this argument will be setted to "explicit". Default: %(default)s') + parser.add_argument('--az-container', type=str, default='', help='Default: %(default)s') + parser.add_argument('--az-account-name', type=str, default='', help='Default: %(default)s') + parser.add_argument('--az-account-key', type=str, default='', help='Default: %(default)s') + parser.add_argument('--az-sas-url', type=str, default='', help='Shared access signature URL. Default: %(default)s') + parser.add_argument('--az-endpoint', type=str, default='', help='Default: %(default)s') + parser.add_argument('--az-upload-part-size', type=int, default=0, help='The buffer size for multipart uploads (MB). ' + + 'Zero means the default (1 MB). Default: %(default)s') + parser.add_argument('--az-upload-concurrency', type=int, default=0, help='How many parts are uploaded in parallel. ' + + 'Zero means the default (1). Default: %(default)s') + parser.add_argument('--az-key-prefix', type=str, default='', help='Virtual root directory. If non empty only this ' + + 'directory and its contents will be available. Cannot start with "/". For example "folder/subfolder/".' + + ' Default: %(default)s') + parser.add_argument('--az-use-emulator', type=bool, default=False, help='Default: %(default)s') if __name__ == '__main__': @@ -769,7 +803,9 @@ if __name__ == '__main__': args.s3_endpoint, args.s3_storage_class, args.s3_key_prefix, args.gcs_bucket, args.gcs_key_prefix, args.gcs_storage_class, args.gcs_credentials_file, args.gcs_automatic_credentials, args.denied_login_methods, args.virtual_folders, args.denied_extensions, args.allowed_extensions, - args.s3_upload_part_size, args.s3_upload_concurrency, args.max_upload_file_size, args.denied_protocols) + args.s3_upload_part_size, args.s3_upload_concurrency, args.max_upload_file_size, args.denied_protocols, + args.az_container, args.az_account_name, args.az_account_key, args.az_sas_url, args.az_endpoint, + args.az_upload_part_size, args.az_upload_concurrency, args.az_key_prefix, args.az_use_emulator) elif args.command == 'update-user': api.updateUser(args.id, args.username, args.password, args.public_keys, args.home_dir, args.uid, args.gid, args.max_sessions, args.quota_size, args.quota_files, args.permissions, args.upload_bandwidth, @@ -779,7 +815,9 @@ if __name__ == '__main__': args.s3_key_prefix, args.gcs_bucket, args.gcs_key_prefix, args.gcs_storage_class, args.gcs_credentials_file, args.gcs_automatic_credentials, args.denied_login_methods, args.virtual_folders, args.denied_extensions, args.allowed_extensions, args.s3_upload_part_size, - args.s3_upload_concurrency, args.max_upload_file_size, args.denied_protocols, args.disconnect) + args.s3_upload_concurrency, args.max_upload_file_size, args.denied_protocols, args.disconnect, + args.az_container, args.az_account_name, args.az_account_key, args.az_sas_url, args.az_endpoint, + args.az_upload_part_size, args.az_upload_concurrency, args.az_key_prefix, args.az_use_emulator) elif args.command == 'delete-user': api.deleteUser(args.id) elif args.command == 'get-users': diff --git a/ftpd/transfer.go b/ftpd/transfer.go index 54b7acfd..833d3367 100644 --- a/ftpd/transfer.go +++ b/ftpd/transfer.go @@ -45,38 +45,34 @@ func newTransfer(baseTransfer *common.BaseTransfer, pipeWriter *vfs.PipeWriter, // Read reads the contents to downloads. func (t *transfer) Read(p []byte) (n int, err error) { t.Connection.UpdateLastActivity() - var readed int - var e error - readed, e = t.reader.Read(p) - atomic.AddInt64(&t.BytesSent, int64(readed)) + n, err = t.reader.Read(p) + atomic.AddInt64(&t.BytesSent, int64(n)) - if e != nil && e != io.EOF { - t.TransferError(e) - return readed, e + if err != nil && err != io.EOF { + t.TransferError(err) + return } t.HandleThrottle() - return readed, e + return } // Write writes the uploaded contents. func (t *transfer) Write(p []byte) (n int, err error) { t.Connection.UpdateLastActivity() - var written int - var e error - written, e = t.writer.Write(p) - atomic.AddInt64(&t.BytesReceived, int64(written)) + n, err = t.writer.Write(p) + atomic.AddInt64(&t.BytesReceived, int64(n)) - if t.MaxWriteSize > 0 && e == nil && atomic.LoadInt64(&t.BytesReceived) > t.MaxWriteSize { - e = common.ErrQuotaExceeded + if t.MaxWriteSize > 0 && err == nil && atomic.LoadInt64(&t.BytesReceived) > t.MaxWriteSize { + err = common.ErrQuotaExceeded } - if e != nil { - t.TransferError(e) - return written, e + if err != nil { + t.TransferError(err) + return } t.HandleThrottle() - return written, e + return } // Seek sets the offset to resume an upload or a download diff --git a/go.mod b/go.mod index 613d3d3a..e6e962e8 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.14 require ( cloud.google.com/go v0.69.1 // indirect cloud.google.com/go/storage v1.12.0 + github.com/Azure/azure-storage-blob-go v0.10.0 github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962 github.com/alexedwards/argon2id v0.0.0-20200802152012-2464efd3196b github.com/aws/aws-sdk-go v1.35.9 diff --git a/go.sum b/go.sum index 85e6e44d..d295a1a5 100644 --- a/go.sum +++ b/go.sum @@ -37,6 +37,20 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.12.0 h1:4y3gHptW1EHVtcPAVE0eBBlFuGqEejTTG3KdIE0lUX4= cloud.google.com/go/storage v1.12.0/go.mod h1:fFLk2dp2oAhDz8QFKwqrjdJvxSp/W2g7nillojlL5Ho= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs= +github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962 h1:KeNholpO2xKjgaaSyd+DyQRrsQjhbSeS7qe4nEw8aQw= @@ -212,6 +226,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= @@ -294,6 +310,8 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/magiconair/properties v1.8.4 h1:8KGKTcQQGm0Kv7vEbKFErAoAOFyyacLStRtQSeYtvkY= github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= diff --git a/httpd/api_user.go b/httpd/api_user.go index 29581bc3..5c257335 100644 --- a/httpd/api_user.go +++ b/httpd/api_user.go @@ -118,9 +118,13 @@ func updateUser(w http.ResponseWriter, r *http.Request) { } currentPermissions := user.Permissions currentS3AccessSecret := "" + currentAzAccountKey := "" if user.FsConfig.Provider == dataprovider.S3FilesystemProvider { currentS3AccessSecret = user.FsConfig.S3Config.AccessSecret } + if user.FsConfig.Provider == dataprovider.AzureBlobFilesystemProvider { + currentAzAccountKey = user.FsConfig.AzBlobConfig.AccountKey + } user.Permissions = make(map[string][]string) err = render.DecodeJSON(r.Body, &user) if err != nil { @@ -131,13 +135,8 @@ func updateUser(w http.ResponseWriter, r *http.Request) { if len(user.Permissions) == 0 { user.Permissions = currentPermissions } - // we use the new access secret if different from the old one and not empty - if user.FsConfig.Provider == dataprovider.S3FilesystemProvider { - if utils.RemoveDecryptionKey(currentS3AccessSecret) == user.FsConfig.S3Config.AccessSecret || - (len(user.FsConfig.S3Config.AccessSecret) == 0 && len(user.FsConfig.S3Config.AccessKey) > 0) { - user.FsConfig.S3Config.AccessSecret = currentS3AccessSecret - } - } + updateEncryptedSecrets(&user, currentS3AccessSecret, currentAzAccountKey) + if user.ID != userID { sendAPIResponse(w, r, err, "user ID in request body does not match user ID in path parameter", http.StatusBadRequest) return @@ -181,3 +180,19 @@ func disconnectUser(username string) { } } } + +func updateEncryptedSecrets(user *dataprovider.User, currentS3AccessSecret, currentAzAccountKey string) { + // we use the new access secret if different from the old one and not empty + if user.FsConfig.Provider == dataprovider.S3FilesystemProvider { + if utils.RemoveDecryptionKey(currentS3AccessSecret) == user.FsConfig.S3Config.AccessSecret || + (user.FsConfig.S3Config.AccessSecret == "" && user.FsConfig.S3Config.AccessKey != "") { + user.FsConfig.S3Config.AccessSecret = currentS3AccessSecret + } + } + if user.FsConfig.Provider == dataprovider.AzureBlobFilesystemProvider { + if utils.RemoveDecryptionKey(currentAzAccountKey) == user.FsConfig.AzBlobConfig.AccountKey || + (user.FsConfig.AzBlobConfig.AccountKey == "" && user.FsConfig.AzBlobConfig.AccountName != "") { + user.FsConfig.AzBlobConfig.AccountKey = currentAzAccountKey + } + } +} diff --git a/httpd/api_utils.go b/httpd/api_utils.go index 2777df24..91d2c98d 100644 --- a/httpd/api_utils.go +++ b/httpd/api_utils.go @@ -620,6 +620,9 @@ func compareUserFsConfig(expected *dataprovider.User, actual *dataprovider.User) if err := compareGCSConfig(expected, actual); err != nil { return err } + if err := compareAzBlobConfig(expected, actual); err != nil { + return err + } return nil } @@ -633,8 +636,8 @@ func compareS3Config(expected *dataprovider.User, actual *dataprovider.User) err if expected.FsConfig.S3Config.AccessKey != actual.FsConfig.S3Config.AccessKey { return errors.New("S3 access key mismatch") } - if err := checkS3AccessSecret(expected.FsConfig.S3Config.AccessSecret, actual.FsConfig.S3Config.AccessSecret); err != nil { - return err + if err := checkEncryptedSecret(expected.FsConfig.S3Config.AccessSecret, actual.FsConfig.S3Config.AccessSecret); err != nil { + return fmt.Errorf("S3 access secret mismatch: %v", err) } if expected.FsConfig.S3Config.Endpoint != actual.FsConfig.S3Config.Endpoint { return errors.New("S3 endpoint mismatch") @@ -672,29 +675,61 @@ func compareGCSConfig(expected *dataprovider.User, actual *dataprovider.User) er return nil } -func checkS3AccessSecret(expectedAccessSecret, actualAccessSecret string) error { +func compareAzBlobConfig(expected *dataprovider.User, actual *dataprovider.User) error { + if expected.FsConfig.AzBlobConfig.Container != actual.FsConfig.AzBlobConfig.Container { + return errors.New("Azure Blob container mismatch") + } + if expected.FsConfig.AzBlobConfig.AccountName != actual.FsConfig.AzBlobConfig.AccountName { + return errors.New("Azure Blob account name mismatch") + } + if err := checkEncryptedSecret(expected.FsConfig.AzBlobConfig.AccountKey, actual.FsConfig.AzBlobConfig.AccountKey); err != nil { + return fmt.Errorf("Azure Blob account key mismatch: %v", err) + } + if expected.FsConfig.AzBlobConfig.Endpoint != actual.FsConfig.AzBlobConfig.Endpoint { + return errors.New("Azure Blob endpoint mismatch") + } + if expected.FsConfig.AzBlobConfig.SASURL != actual.FsConfig.AzBlobConfig.SASURL { + return errors.New("Azure Blob SASL URL mismatch") + } + if expected.FsConfig.AzBlobConfig.UploadPartSize != actual.FsConfig.AzBlobConfig.UploadPartSize { + return errors.New("Azure Blob upload part size mismatch") + } + if expected.FsConfig.AzBlobConfig.UploadConcurrency != actual.FsConfig.AzBlobConfig.UploadConcurrency { + return errors.New("Azure Blob upload concurrency mismatch") + } + if expected.FsConfig.AzBlobConfig.KeyPrefix != actual.FsConfig.AzBlobConfig.KeyPrefix && + expected.FsConfig.AzBlobConfig.KeyPrefix+"/" != actual.FsConfig.AzBlobConfig.KeyPrefix { + return errors.New("Azure Blob key prefix mismatch") + } + if expected.FsConfig.AzBlobConfig.UseEmulator != actual.FsConfig.AzBlobConfig.UseEmulator { + return errors.New("Azure Blob use emulator mismatch") + } + return nil +} + +func checkEncryptedSecret(expectedAccessSecret, actualAccessSecret string) error { if len(expectedAccessSecret) > 0 { vals := strings.Split(expectedAccessSecret, "$") if strings.HasPrefix(expectedAccessSecret, "$aes$") && len(vals) == 4 { expectedAccessSecret = utils.RemoveDecryptionKey(expectedAccessSecret) if expectedAccessSecret != actualAccessSecret { - return fmt.Errorf("S3 access secret mismatch, expected: %v", expectedAccessSecret) + return fmt.Errorf("secret mismatch, expected: %v", expectedAccessSecret) } } else { // here we check that actualAccessSecret is aes encrypted without the nonce parts := strings.Split(actualAccessSecret, "$") if !strings.HasPrefix(actualAccessSecret, "$aes$") || len(parts) != 3 { - return errors.New("Invalid S3 access secret") + return errors.New("invalid secret") } if len(parts) == len(vals) { if expectedAccessSecret != actualAccessSecret { - return errors.New("S3 encrypted access secret mismatch") + return errors.New("encrypted secret mismatch") } } } } else { if expectedAccessSecret != actualAccessSecret { - return errors.New("S3 access secret mismatch") + return errors.New("secret mismatch") } } return nil diff --git a/httpd/httpd_test.go b/httpd/httpd_test.go index eff43b42..cf986ded 100644 --- a/httpd/httpd_test.go +++ b/httpd/httpd_test.go @@ -433,6 +433,23 @@ func TestAddUserInvalidFsConfig(t *testing.T) { u.FsConfig.GCSConfig.Credentials = invalidBase64{} _, _, err = httpd.AddUser(u, http.StatusBadRequest) assert.NoError(t, err) + + u = getTestUser() + u.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider + u.FsConfig.AzBlobConfig.SASURL = "http://foo\x7f.com/" + _, _, err = httpd.AddUser(u, http.StatusBadRequest) + assert.NoError(t, err) + u.FsConfig.AzBlobConfig.SASURL = "" + u.FsConfig.AzBlobConfig.AccountName = "name" + _, _, err = httpd.AddUser(u, http.StatusBadRequest) + assert.NoError(t, err) + u.FsConfig.AzBlobConfig.Container = "container" + _, _, err = httpd.AddUser(u, http.StatusBadRequest) + assert.NoError(t, err) + u.FsConfig.AzBlobConfig.AccountKey = "key" + u.FsConfig.AzBlobConfig.KeyPrefix = "/amedir/subdir/" + _, _, err = httpd.AddUser(u, http.StatusBadRequest) + assert.NoError(t, err) } func TestAddUserInvalidVirtualFolders(t *testing.T) { @@ -1024,6 +1041,50 @@ func TestUserGCSConfig(t *testing.T) { assert.NoError(t, err) } +func TestUserAzureBlobConfig(t *testing.T) { + user, _, err := httpd.AddUser(getTestUser(), http.StatusOK) + assert.NoError(t, err) + user.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider + user.FsConfig.AzBlobConfig.Container = "test" + user.FsConfig.AzBlobConfig.AccountName = "Server-Account-Name" + user.FsConfig.AzBlobConfig.AccountKey = "Server-Account-Key" + user.FsConfig.AzBlobConfig.Endpoint = "http://127.0.0.1:9000" + user.FsConfig.AzBlobConfig.UploadPartSize = 8 + user, _, err = httpd.UpdateUser(user, http.StatusOK, "") + assert.NoError(t, err) + _, err = httpd.RemoveUser(user, http.StatusOK) + assert.NoError(t, err) + user.Password = defaultPassword + user.ID = 0 + secret, _ := utils.EncryptData("Server-Account-Key") + user.FsConfig.AzBlobConfig.AccountKey = secret + user, _, err = httpd.AddUser(user, http.StatusOK) + assert.NoError(t, err) + user.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider + user.FsConfig.AzBlobConfig.Container = "test-container" + user.FsConfig.AzBlobConfig.AccountKey = "Server-Account-Key1" + user.FsConfig.AzBlobConfig.Endpoint = "http://localhost:9001" + user.FsConfig.AzBlobConfig.KeyPrefix = "somedir/subdir" + user.FsConfig.AzBlobConfig.UploadConcurrency = 5 + user, _, err = httpd.UpdateUser(user, http.StatusOK, "") + assert.NoError(t, err) + user.FsConfig.Provider = dataprovider.LocalFilesystemProvider + user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{} + user, _, err = httpd.UpdateUser(user, http.StatusOK, "") + assert.NoError(t, err) + // test user without access key and access secret (sas) + user.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider + + user.FsConfig.AzBlobConfig.SASURL = "https://myaccount.blob.core.windows.net/pictures/profile.jpg?sv=2012-02-12&st=2009-02-09&se=2009-02-10&sr=c&sp=r&si=YWJjZGVmZw%3d%3d&sig=dD80ihBh5jfNpymO5Hg1IdiJIEvHcJpCMiCMnN%2fRnbI%3d" + user.FsConfig.AzBlobConfig.KeyPrefix = "somedir/subdir" + user.FsConfig.AzBlobConfig.UploadPartSize = 6 + user.FsConfig.AzBlobConfig.UploadConcurrency = 4 + user, _, err = httpd.UpdateUser(user, http.StatusOK, "") + assert.NoError(t, err) + _, err = httpd.RemoveUser(user, http.StatusOK) + assert.NoError(t, err) +} + func TestUpdateUserNoCredentials(t *testing.T) { user, _, err := httpd.AddUser(getTestUser(), http.StatusOK) assert.NoError(t, err) @@ -2736,6 +2797,96 @@ func TestWebUserGCSMock(t *testing.T) { err = os.Remove(credentialsFilePath) assert.NoError(t, err) } +func TestWebUserAzureBlobMock(t *testing.T) { + user := getTestUser() + userAsJSON := getUserAsJSON(t, user) + req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON)) + rr := executeRequest(req) + checkResponseCode(t, http.StatusOK, rr.Code) + err := render.DecodeJSON(rr.Body, &user) + assert.NoError(t, err) + user.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider + user.FsConfig.AzBlobConfig.Container = "container" + user.FsConfig.AzBlobConfig.AccountName = "aname" + user.FsConfig.AzBlobConfig.AccountKey = "access-skey" + user.FsConfig.AzBlobConfig.Endpoint = "http://127.0.0.1:9000/path?b=c" + user.FsConfig.AzBlobConfig.KeyPrefix = "somedir/subdir/" + user.FsConfig.AzBlobConfig.UploadPartSize = 5 + user.FsConfig.AzBlobConfig.UploadConcurrency = 4 + user.FsConfig.AzBlobConfig.UseEmulator = true + form := make(url.Values) + form.Set("username", user.Username) + form.Set("home_dir", user.HomeDir) + form.Set("uid", "0") + form.Set("gid", strconv.FormatInt(int64(user.GID), 10)) + form.Set("max_sessions", strconv.FormatInt(int64(user.MaxSessions), 10)) + form.Set("quota_size", strconv.FormatInt(user.QuotaSize, 10)) + form.Set("quota_files", strconv.FormatInt(int64(user.QuotaFiles), 10)) + form.Set("upload_bandwidth", "0") + form.Set("download_bandwidth", "0") + form.Set("permissions", "*") + form.Set("sub_dirs_permissions", "") + form.Set("status", strconv.Itoa(user.Status)) + form.Set("expiration_date", "2020-01-01 00:00:00") + form.Set("allowed_ip", "") + form.Set("denied_ip", "") + form.Set("fs_provider", "3") + form.Set("az_container", user.FsConfig.AzBlobConfig.Container) + form.Set("az_account_name", user.FsConfig.AzBlobConfig.AccountName) + form.Set("az_account_key", user.FsConfig.AzBlobConfig.AccountKey) + form.Set("az_sas_url", user.FsConfig.AzBlobConfig.SASURL) + form.Set("az_endpoint", user.FsConfig.AzBlobConfig.Endpoint) + form.Set("az_key_prefix", user.FsConfig.AzBlobConfig.KeyPrefix) + form.Set("az_use_emulator", "checked") + form.Set("allowed_extensions", "/dir1::.jpg,.png") + form.Set("denied_extensions", "/dir2::.zip") + form.Set("max_upload_file_size", "0") + // test invalid az_upload_part_size + form.Set("az_upload_part_size", "a") + b, contentType, _ := getMultipartFormData(form, "", "") + req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b) + req.Header.Set("Content-Type", contentType) + rr = executeRequest(req) + checkResponseCode(t, http.StatusOK, rr.Code) + // test invalid az_upload_concurrency + form.Set("az_upload_part_size", strconv.FormatInt(user.FsConfig.AzBlobConfig.UploadPartSize, 10)) + form.Set("az_upload_concurrency", "a") + b, contentType, _ = getMultipartFormData(form, "", "") + req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b) + req.Header.Set("Content-Type", contentType) + rr = executeRequest(req) + checkResponseCode(t, http.StatusOK, rr.Code) + // now add the user + form.Set("az_upload_concurrency", strconv.Itoa(user.FsConfig.AzBlobConfig.UploadConcurrency)) + b, contentType, _ = getMultipartFormData(form, "", "") + req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b) + req.Header.Set("Content-Type", contentType) + rr = executeRequest(req) + checkResponseCode(t, http.StatusSeeOther, rr.Code) + req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASC&username="+user.Username, nil) + rr = executeRequest(req) + checkResponseCode(t, http.StatusOK, rr.Code) + var users []dataprovider.User + err = render.DecodeJSON(rr.Body, &users) + assert.NoError(t, err) + assert.Equal(t, 1, len(users)) + updateUser := users[0] + assert.Equal(t, int64(1577836800000), updateUser.ExpirationDate) + assert.Equal(t, updateUser.FsConfig.AzBlobConfig.Container, user.FsConfig.AzBlobConfig.Container) + assert.Equal(t, updateUser.FsConfig.AzBlobConfig.AccountName, user.FsConfig.AzBlobConfig.AccountName) + assert.Equal(t, updateUser.FsConfig.AzBlobConfig.Endpoint, user.FsConfig.AzBlobConfig.Endpoint) + assert.Equal(t, updateUser.FsConfig.AzBlobConfig.SASURL, user.FsConfig.AzBlobConfig.SASURL) + assert.Equal(t, updateUser.FsConfig.AzBlobConfig.KeyPrefix, user.FsConfig.AzBlobConfig.KeyPrefix) + assert.Equal(t, updateUser.FsConfig.AzBlobConfig.UploadPartSize, user.FsConfig.AzBlobConfig.UploadPartSize) + assert.Equal(t, updateUser.FsConfig.AzBlobConfig.UploadConcurrency, user.FsConfig.AzBlobConfig.UploadConcurrency) + assert.Equal(t, 2, len(updateUser.Filters.FileExtensions)) + if !strings.HasPrefix(updateUser.FsConfig.AzBlobConfig.AccountKey, "$aes$") { + t.Error("azure account secret is not encrypted") + } + req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil) + rr = executeRequest(req) + checkResponseCode(t, http.StatusOK, rr.Code) +} func TestAddWebFoldersMock(t *testing.T) { mappedPath := filepath.Clean(os.TempDir()) diff --git a/httpd/internal_test.go b/httpd/internal_test.go index 4ea0273b..9a21daf8 100644 --- a/httpd/internal_test.go +++ b/httpd/internal_test.go @@ -353,6 +353,47 @@ func TestCompareUserGCSConfig(t *testing.T) { expected.FsConfig.GCSConfig.AutomaticCredentials = 0 } +func TestCompareUserAzureConfig(t *testing.T) { + expected := &dataprovider.User{} + actual := &dataprovider.User{} + expected.FsConfig.AzBlobConfig.Container = "a" + err := compareUserFsConfig(expected, actual) + assert.Error(t, err) + expected.FsConfig.AzBlobConfig.Container = "" + expected.FsConfig.AzBlobConfig.AccountName = "aname" + err = compareUserFsConfig(expected, actual) + assert.Error(t, err) + expected.FsConfig.AzBlobConfig.AccountName = "" + expected.FsConfig.AzBlobConfig.AccountKey = "akey" + err = compareUserFsConfig(expected, actual) + assert.Error(t, err) + expected.FsConfig.AzBlobConfig.AccountKey = "" + expected.FsConfig.AzBlobConfig.Endpoint = "endpt" + err = compareUserFsConfig(expected, actual) + assert.Error(t, err) + expected.FsConfig.AzBlobConfig.Endpoint = "" + expected.FsConfig.AzBlobConfig.SASURL = "url" + err = compareUserFsConfig(expected, actual) + assert.Error(t, err) + expected.FsConfig.AzBlobConfig.SASURL = "" + expected.FsConfig.AzBlobConfig.UploadPartSize = 1 + err = compareUserFsConfig(expected, actual) + assert.Error(t, err) + expected.FsConfig.AzBlobConfig.UploadPartSize = 0 + expected.FsConfig.AzBlobConfig.UploadConcurrency = 1 + err = compareUserFsConfig(expected, actual) + assert.Error(t, err) + expected.FsConfig.AzBlobConfig.UploadConcurrency = 0 + expected.FsConfig.AzBlobConfig.KeyPrefix = "prefix/" + err = compareUserFsConfig(expected, actual) + assert.Error(t, err) + expected.FsConfig.AzBlobConfig.KeyPrefix = "" + expected.FsConfig.AzBlobConfig.UseEmulator = true + err = compareUserFsConfig(expected, actual) + assert.Error(t, err) + expected.FsConfig.AzBlobConfig.UseEmulator = false +} + func TestGCSWebInvalidFormFile(t *testing.T) { form := make(url.Values) form.Set("username", "test_username") diff --git a/httpd/schema/openapi.yaml b/httpd/schema/openapi.yaml index 2b16cdec..390bdea2 100644 --- a/httpd/schema/openapi.yaml +++ b/httpd/schema/openapi.yaml @@ -925,10 +925,8 @@ components: minLength: 1 access_key: type: string - minLength: 1 access_secret: type: string - minLength: 1 description: the access secret is stored encrypted (AES-256-GCM) endpoint: type: string @@ -980,6 +978,37 @@ components: - bucket nullable: true description: Google Cloud Storage configuration details + AzureBlobFsConfig: + type: object + properties: + container: + type: string + account_name: + type: string + description: Storage Account Name, leave blank to use SAS URL + account_key: + type: string + description: Storage Account Key leave blank to use SAS URL. The access key is stored encrypted (AES-256-GCM) + sas_url: + type: string + description: Shared access signature URL, leave blank if using account/key + endpoint: + type: string + description: optional endpoint. Default is "blob.core.windows.net". If you use the emulator the endpoint must include the protocol, for example "http://127.0.0.1:10000" + upload_part_size: + type: integer + description: the buffer size (in MB) to use for multipart uploads. If this value is set to zero, the default value (4MB) will be used. + upload_concurrency: + type: integer + description: the number of parts to upload in parallel. If this value is set to zero, the default value (2) will be used + key_prefix: + type: string + description: key_prefix is similar to a chroot directory for a local filesystem. If specified the user will only see contents that starts with this prefix and so you can restrict access to a specific virtual folder. The prefix, if not empty, must not start with "/" and must end with "/". If empty the whole container contents will be available + example: folder/subfolder/ + use_emulator: + type: boolean + nullable: true + description: Azure Blob Storage configuration details FilesystemConfig: type: object properties: @@ -989,15 +1018,19 @@ components: - 0 - 1 - 2 + - 3 description: > Providers: - * `0` - local filesystem + * `0` - Local filesystem * `1` - S3 Compatible Object Storage * `2` - Google Cloud Storage + * `3` - Azure Blob Storage s3config: $ref: '#/components/schemas/S3Config' gcsconfig: $ref: '#/components/schemas/GCSConfig' + azblobconfig: + $ref: '#/components/schemas/AzureBlobFsConfig' description: Storage filesystem details BaseVirtualFolder: type: object diff --git a/httpd/web.go b/httpd/web.go index 7fc5c44e..a59d5aa9 100644 --- a/httpd/web.go +++ b/httpd/web.go @@ -431,6 +431,22 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er } fs.GCSConfig.Credentials = fileBytes fs.GCSConfig.AutomaticCredentials = 0 + } else if fs.Provider == dataprovider.AzureBlobFilesystemProvider { + fs.AzBlobConfig.Container = r.Form.Get("az_container") + fs.AzBlobConfig.AccountName = r.Form.Get("az_account_name") + fs.AzBlobConfig.AccountKey = r.Form.Get("az_account_key") + fs.AzBlobConfig.SASURL = r.Form.Get("az_sas_url") + fs.AzBlobConfig.Endpoint = r.Form.Get("az_endpoint") + fs.AzBlobConfig.KeyPrefix = r.Form.Get("az_key_prefix") + fs.AzBlobConfig.UseEmulator = len(r.Form.Get("az_use_emulator")) > 0 + fs.AzBlobConfig.UploadPartSize, err = strconv.ParseInt(r.Form.Get("az_upload_part_size"), 10, 64) + if err != nil { + return fs, err + } + fs.AzBlobConfig.UploadConcurrency, err = strconv.Atoi(r.Form.Get("az_upload_concurrency")) + if err != nil { + return fs, err + } } return fs, nil } diff --git a/pkgs/build.sh b/pkgs/build.sh index 6717f015..0c6a04e9 100755 --- a/pkgs/build.sh +++ b/pkgs/build.sh @@ -42,8 +42,8 @@ provides: description: | Fully featured and highly configurable SFTP server SFTPGo has optional FTP/S and WebDAV support. - It can serve local filesystem, S3 (Compatible) Object Storages - and Google Cloud Storage + It can serve local filesystem, S3 (Compatible) Object Storage, + Google Cloud Storage and Azure Blob Storage. vendor: "SFTPGo" homepage: "https://github.com/drakkan/sftpgo" license: "GPL-3.0" diff --git a/pkgs/debian/control b/pkgs/debian/control index bbf00661..7df9ddf3 100644 --- a/pkgs/debian/control +++ b/pkgs/debian/control @@ -13,5 +13,5 @@ Depends: ${shlibs:Depends}, ${misc:Depends} Recommends: bash-completion, python3-requests, python3-pygments, mime-support Description: Fully featured and highly configurable SFTP server SFTPGo has optional FTP/S and WebDAV support. - It can serve local filesystem, S3 (Compatible) Object Storages - and Google Cloud Storage + It can serve local filesystem, S3 (Compatible) Object Storage, + Google Cloud Storage and Azure Blob Storage. diff --git a/sftpd/transfer.go b/sftpd/transfer.go index 7afc087f..6d200036 100644 --- a/sftpd/transfer.go +++ b/sftpd/transfer.go @@ -82,20 +82,18 @@ func newTransfer(baseTransfer *common.BaseTransfer, pipeWriter *vfs.PipeWriter, // It handles download bandwidth throttling too func (t *transfer) ReadAt(p []byte, off int64) (n int, err error) { t.Connection.UpdateLastActivity() - var readed int - var e error - readed, e = t.readerAt.ReadAt(p, off) - atomic.AddInt64(&t.BytesSent, int64(readed)) + n, err = t.readerAt.ReadAt(p, off) + atomic.AddInt64(&t.BytesSent, int64(n)) - if e != nil && e != io.EOF { + if err != nil && err != io.EOF { if t.GetType() == common.TransferDownload { - t.TransferError(e) + t.TransferError(err) } - return readed, e + return } t.HandleThrottle() - return readed, e + return } // WriteAt writes len(p) bytes to the uploaded file starting at byte offset off and updates the bytes received. @@ -107,21 +105,19 @@ func (t *transfer) WriteAt(p []byte, off int64) (n int, err error) { t.TransferError(err) return 0, err } - var written int - var e error - written, e = t.writerAt.WriteAt(p, off) - atomic.AddInt64(&t.BytesReceived, int64(written)) + n, err = t.writerAt.WriteAt(p, off) + atomic.AddInt64(&t.BytesReceived, int64(n)) - if t.MaxWriteSize > 0 && e == nil && atomic.LoadInt64(&t.BytesReceived) > t.MaxWriteSize { - e = common.ErrQuotaExceeded + if t.MaxWriteSize > 0 && err == nil && atomic.LoadInt64(&t.BytesReceived) > t.MaxWriteSize { + err = common.ErrQuotaExceeded } - if e != nil { - t.TransferError(e) - return written, e + if err != nil { + t.TransferError(err) + return } t.HandleThrottle() - return written, e + return } // Close it is called when the transfer is completed. diff --git a/templates/user.html b/templates/user.html index 812b5c04..b1a6400e 100644 --- a/templates/user.html +++ b/templates/user.html @@ -278,6 +278,7 @@ + @@ -399,6 +400,81 @@ +