WebClient/HTTP API: ensure to check home dir, when needed, in multi-node setups

Behind a load balancer with no sticky sessions enabled is not enough to check
the home dir only when the client logs in

Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
Nicola Murino 2022-09-07 16:23:56 +02:00
parent 3ebdfa9b2d
commit 63e3891808
No known key found for this signature in database
GPG key ID: 2F1FB59433D5A8CB
5 changed files with 9 additions and 2 deletions

View file

@ -57,7 +57,7 @@ The configuration file contains the following sections:
- **"common"**, configuration parameters shared among all the supported protocols
- `idle_timeout`, integer. Time in minutes after which an idle client will be disconnected. 0 means disabled. Default: 15
- `upload_mode` integer. 0 means standard: the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode, if there is an upload error, the temporary file is deleted and so the requested upload path will not contain a partial file. 2 means atomic with resume support: same as atomic but if there is an upload error, the temporary file is renamed to the requested path and not deleted. This way, a client can reconnect and resume the upload. Default: 0
- `upload_mode` integer. 0 means standard: the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode, if there is an upload error, the temporary file is deleted and so the requested upload path will not contain a partial file. 2 means atomic with resume support: same as atomic but if there is an upload error, the temporary file is renamed to the requested path and not deleted. This way, a client can reconnect and resume the upload. Ignored for cloud-based storage backends (uploads are always atomic and resume is not supported for these backends) and for SFTP backend if buffering is enabled. Default: 0
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See [Custom Actions](./custom-actions.md) for more details
- `execute_on`, list of strings. Valid values are `pre-download`, `download`, `pre-upload`, `upload`, `pre-delete`, `delete`, `rename`, `mkdir`, `rmdir`, `ssh_cmd`. Leave empty to disable actions.
- `execute_sync`, list of strings. Actions, defined in the `execute_on` list above, to be performed synchronously. The `pre-*` actions are always executed synchronously while the other ones are asynchronous. Executing an action synchronously means that SFTPGo will not return a result code to the client (which is waiting for it) until your hook have completed its execution. Leave empty to execute only the defined `pre-*` hook synchronously

View file

@ -462,7 +462,8 @@ type Config struct {
BackupsPath string `json:"backups_path" mapstructure:"backups_path"`
}
// GetShared returns the provider share mode
// GetShared returns the provider share mode.
// This method is called before the provider is initialized
func (c *Config) GetShared() int {
if !util.Contains(sharedProviders, c.Driver) {
return 0

View file

@ -96,6 +96,7 @@ func createUserDir(w http.ResponseWriter, r *http.Request) {
return
}
}
connection.User.CheckFsRoot(connection.ID) //nolint:errcheck
err = connection.CreateDir(name, true)
if err != nil {
sendAPIResponse(w, r, err, fmt.Sprintf("Unable to create directory %#v", name), getMappedStatusCode(err))
@ -225,6 +226,7 @@ func uploadUserFile(w http.ResponseWriter, r *http.Request) {
}
func doUploadFile(w http.ResponseWriter, r *http.Request, connection *Connection, filePath string) error {
connection.User.CheckFsRoot(connection.ID) //nolint:errcheck
writer, err := connection.getFileWriter(filePath)
if err != nil {
sendAPIResponse(w, r, err, fmt.Sprintf("Unable to write file %#v", filePath), getMappedStatusCode(err))
@ -294,6 +296,7 @@ func uploadUserFiles(w http.ResponseWriter, r *http.Request) {
func doUploadFiles(w http.ResponseWriter, r *http.Request, connection *Connection, parentDir string,
files []*multipart.FileHeader,
) int {
connection.User.CheckFsRoot(connection.ID) //nolint:errcheck
uploaded := 0
connection.User.UploadBandwidth = 0
for _, f := range files {

View file

@ -232,6 +232,7 @@ func getCompressedFileName(username string, files []string) string {
func renderCompressedFiles(w http.ResponseWriter, conn *Connection, baseDir string, files []string,
share *dataprovider.Share,
) {
conn.User.CheckFsRoot(conn.ID) //nolint:errcheck
w.Header().Set("Content-Type", "application/zip")
w.Header().Set("Accept-Ranges", "none")
w.Header().Set("Content-Transfer-Encoding", "binary")
@ -326,6 +327,7 @@ func checkDownloadFileFromShare(share *dataprovider.Share, info os.FileInfo) err
func downloadFile(w http.ResponseWriter, r *http.Request, connection *Connection, name string,
info os.FileInfo, inline bool, share *dataprovider.Share,
) (int, error) {
connection.User.CheckFsRoot(connection.ID) //nolint:errcheck
err := checkDownloadFileFromShare(share, info)
if err != nil {
return http.StatusBadRequest, err

View file

@ -954,6 +954,7 @@ func (s *httpdServer) handleClientEditFile(w http.ResponseWriter, r *http.Reques
return
}
connection.User.CheckFsRoot(connection.ID) //nolint:errcheck
reader, err := connection.getFileReader(name, 0, r.Method)
if err != nil {
s.renderClientMessagePage(w, r, fmt.Sprintf("Unable to get a reader for the file %#v", name), "",