Add support for loading users/folders on startup

Fixes #161
This commit is contained in:
Nicola Murino 2020-10-20 18:42:37 +02:00
parent b51d795e04
commit bb5207ad77
No known key found for this signature in database
GPG key ID: 2F1FB59433D5A8CB
9 changed files with 220 additions and 73 deletions

View file

@ -54,7 +54,7 @@ SFTPGo is developed and tested on Linux. After each commit, the code is automati
## Requirements ## Requirements
- Go 1.13 or higher as build only dependency. - Go 1.14 or higher as build only dependency.
- A suitable SQL server to use as data provider: PostgreSQL 9.4+ or MySQL 5.6+ or SQLite 3.x. - A suitable SQL server to use as data provider: PostgreSQL 9.4+ or MySQL 5.6+ or SQLite 3.x.
- The SQL server is optional: you can choose to use an embedded bolt database as key/value store or an in memory data provider. - The SQL server is optional: you can choose to use an embedded bolt database as key/value store or an in memory data provider.

View file

@ -13,45 +13,61 @@ import (
) )
const ( const (
configDirFlag = "config-dir" configDirFlag = "config-dir"
configDirKey = "config_dir" configDirKey = "config_dir"
configFileFlag = "config-file" configFileFlag = "config-file"
configFileKey = "config_file" configFileKey = "config_file"
logFilePathFlag = "log-file-path" logFilePathFlag = "log-file-path"
logFilePathKey = "log_file_path" logFilePathKey = "log_file_path"
logMaxSizeFlag = "log-max-size" logMaxSizeFlag = "log-max-size"
logMaxSizeKey = "log_max_size" logMaxSizeKey = "log_max_size"
logMaxBackupFlag = "log-max-backups" logMaxBackupFlag = "log-max-backups"
logMaxBackupKey = "log_max_backups" logMaxBackupKey = "log_max_backups"
logMaxAgeFlag = "log-max-age" logMaxAgeFlag = "log-max-age"
logMaxAgeKey = "log_max_age" logMaxAgeKey = "log_max_age"
logCompressFlag = "log-compress" logCompressFlag = "log-compress"
logCompressKey = "log_compress" logCompressKey = "log_compress"
logVerboseFlag = "log-verbose" logVerboseFlag = "log-verbose"
logVerboseKey = "log_verbose" logVerboseKey = "log_verbose"
profilerFlag = "profiler" profilerFlag = "profiler"
profilerKey = "profiler" profilerKey = "profiler"
defaultConfigDir = "." loadDataFromFlag = "loaddata-from"
defaultConfigName = config.DefaultConfigName loadDataFromKey = "loaddata_from"
defaultLogFile = "sftpgo.log" loadDataModeFlag = "loaddata-mode"
defaultLogMaxSize = 10 loadDataModeKey = "loaddata_mode"
defaultLogMaxBackup = 5 loadDataQuotaScanFlag = "loaddata-scan"
defaultLogMaxAge = 28 loadDataQuotaScanKey = "loaddata_scan"
defaultLogCompress = false loadDataCleanFlag = "loaddata-clean"
defaultLogVerbose = true loadDataCleanKey = "loaddata_clean"
defaultProfiler = false defaultConfigDir = "."
defaultConfigName = config.DefaultConfigName
defaultLogFile = "sftpgo.log"
defaultLogMaxSize = 10
defaultLogMaxBackup = 5
defaultLogMaxAge = 28
defaultLogCompress = false
defaultLogVerbose = true
defaultProfiler = false
defaultLoadDataFrom = ""
defaultLoadDataMode = 1
defaultLoadDataQuotaScan = 0
defaultLoadDataClean = false
) )
var ( var (
configDir string configDir string
configFile string configFile string
logFilePath string logFilePath string
logMaxSize int logMaxSize int
logMaxBackups int logMaxBackups int
logMaxAge int logMaxAge int
logCompress bool logCompress bool
logVerbose bool logVerbose bool
profiler bool profiler bool
loadDataFrom string
loadDataMode int
loadDataQuotaScan int
loadDataClean bool
rootCmd = &cobra.Command{ rootCmd = &cobra.Command{
Use: "sftpgo", Use: "sftpgo",
@ -172,4 +188,51 @@ be accessible via HTTP/HTTPS using the base URL
This flag can be set using SFTPGO_PROFILER env This flag can be set using SFTPGO_PROFILER env
var too.`) var too.`)
viper.BindPFlag(profilerKey, cmd.Flags().Lookup(profilerFlag)) //nolint:errcheck viper.BindPFlag(profilerKey, cmd.Flags().Lookup(profilerFlag)) //nolint:errcheck
viper.SetDefault(loadDataFromKey, defaultLoadDataFrom)
viper.BindEnv(loadDataFromKey, "SFTPGO_LOADDATA_FROM") //nolint:errcheck
cmd.Flags().StringVar(&loadDataFrom, loadDataFromFlag, viper.GetString(loadDataFromKey),
`Load users and folders from this file.
The file must be specified as absolute path
and it must contain a backup obtained using
the "dumpdata" REST API or compatible content.
This flag can be set using SFTPGO_LOADDATA_FROM
env var too.
`)
viper.BindPFlag(loadDataFromKey, cmd.Flags().Lookup(loadDataFromFlag)) //nolint:errcheck
viper.SetDefault(loadDataModeKey, defaultLoadDataMode)
viper.BindEnv(loadDataModeKey, "SFTPGO_LOADDATA_MODE") //nolint:errcheck
cmd.Flags().IntVar(&loadDataMode, loadDataModeFlag, viper.GetInt(loadDataModeKey),
`Restore mode for data to load:
0 - new users are added, existing users are
updated
1 - New users are added, existing users are
not modified
This flag can be set using SFTPGO_LOADDATA_MODE
env var too.
`)
viper.BindPFlag(loadDataModeKey, cmd.Flags().Lookup(loadDataModeFlag)) //nolint:errcheck
viper.SetDefault(loadDataQuotaScanKey, defaultLoadDataQuotaScan)
viper.BindEnv(loadDataQuotaScanKey, "SFTPGO_LOADDATA_QUOTA_SCAN") //nolint:errcheck
cmd.Flags().IntVar(&loadDataQuotaScan, loadDataQuotaScanFlag, viper.GetInt(loadDataQuotaScanKey),
`Quota scan mode after data load:
0 - no quota scan
1 - scan quota
2 - scan quota if the user has quota restrictions
This flag can be set using SFTPGO_LOADDATA_QUOTA_SCAN
env var too.
(default 0)`)
viper.BindPFlag(loadDataQuotaScanKey, cmd.Flags().Lookup(loadDataQuotaScanFlag)) //nolint:errcheck
viper.SetDefault(loadDataCleanKey, defaultLoadDataClean)
viper.BindEnv(loadDataCleanKey, "SFTPGO_LOADDATA_CLEAN") //nolint:errcheck
cmd.Flags().BoolVar(&loadDataClean, loadDataCleanFlag, viper.GetBool(loadDataCleanKey),
`Determine if the loaddata-from file should
be removed after a successful load. This flag
can be set using SFTPGO_LOADDATA_CLEAN env var
too. (default "false")
`)
viper.BindPFlag(logCompressKey, cmd.Flags().Lookup(logCompressFlag)) //nolint:errcheck
} }

View file

@ -21,16 +21,20 @@ $ sftpgo serve
Please take a look at the usage below to customize the startup options`, Please take a look at the usage below to customize the startup options`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
service := service.Service{ service := service.Service{
ConfigDir: utils.CleanDirInput(configDir), ConfigDir: utils.CleanDirInput(configDir),
ConfigFile: configFile, ConfigFile: configFile,
LogFilePath: logFilePath, LogFilePath: logFilePath,
LogMaxSize: logMaxSize, LogMaxSize: logMaxSize,
LogMaxBackups: logMaxBackups, LogMaxBackups: logMaxBackups,
LogMaxAge: logMaxAge, LogMaxAge: logMaxAge,
LogCompress: logCompress, LogCompress: logCompress,
LogVerbose: logVerbose, LogVerbose: logVerbose,
Profiler: profiler, LoadDataFrom: loadDataFrom,
Shutdown: make(chan bool), LoadDataMode: loadDataMode,
LoadDataQuotaScan: loadDataQuotaScan,
LoadDataClean: loadDataClean,
Profiler: profiler,
Shutdown: make(chan bool),
} }
if err := service.Start(); err == nil { if err := service.Start(); err == nil {
service.Wait() service.Wait()

View file

@ -23,6 +23,8 @@ docker run --name some-sftpgo -p 127.0.0.1:8080:8080 -p 2022:2022 -d "drakkan/sf
Now visit [http://localhost:8080/](http://localhost:8080/) and create a new SFTPGo user. The SFTP service is available on port 2022. Now visit [http://localhost:8080/](http://localhost:8080/) and create a new SFTPGo user. The SFTP service is available on port 2022.
If you prefer GitHub Container Registry to Docker Hub replace `drakkan/sftpgo:tag` with `ghcr.io/drakkan/sftpgo:tag`.
### Container shell access and viewing SFTPGo logs ### Container shell access and viewing SFTPGo logs
The docker exec command allows you to run commands inside a Docker container. The following command line will give you a bash shell inside your `sftpgo` container: The docker exec command allows you to run commands inside a Docker container. The following command line will give you a bash shell inside your `sftpgo` container:

View file

@ -26,18 +26,23 @@ The `serve` command supports the following flags:
- `--config-dir` string. Location of the config dir. This directory should contain the configuration file and is used as the base directory for any files that use a relative path (eg. the private keys for the SFTP server, the SQLite or bblot database if you use SQLite or bbolt as data provider). The default value is "." or the value of `SFTPGO_CONFIG_DIR` environment variable. - `--config-dir` string. Location of the config dir. This directory should contain the configuration file and is used as the base directory for any files that use a relative path (eg. the private keys for the SFTP server, the SQLite or bblot database if you use SQLite or bbolt as data provider). The default value is "." or the value of `SFTPGO_CONFIG_DIR` environment variable.
- `--config-file` string. Name of the configuration file. It must be the name of a file stored in `config-dir`, not the absolute path to the configuration file. The specified file name must have no extension because we automatically append JSON, YAML, TOML, HCL and Java extensions when we search for the file. The default value is "sftpgo" (and therefore `sftpgo.json`, `sftpgo.yaml` and so on are searched) or the value of `SFTPGO_CONFIG_FILE` environment variable. - `--config-file` string. Name of the configuration file. It must be the name of a file stored in `config-dir`, not the absolute path to the configuration file. The specified file name must have no extension because we automatically append JSON, YAML, TOML, HCL and Java extensions when we search for the file. The default value is "sftpgo" (and therefore `sftpgo.json`, `sftpgo.yaml` and so on are searched) or the value of `SFTPGO_CONFIG_FILE` environment variable.
- `--loaddata-from` string. Load users and folders from this file. The file must be specified as absolute path and it must contain a backup obtained using the `dumpdata` REST API or compatible content. The default value is empty or the value of `SFTPGO_LOADDATA_FROM` environment variable.
- `--loaddata-clean` boolean. Determine if the loaddata-from file should be removed after a successful load. Default `false` or the value of `SFTPGO_LOADDATA_CLEAN` environment variable (1 or `true`, 0 or `false`).
- `--loaddata-mode`, integer. Restore mode for data to load. 0 means new users are added, existing users are updated. 1 means new users are added, existing users are not modified. Default 1 or the value of `SFTPGO_LOADDATA_MODE` environment variable.
- `--loaddata-scan`, integer. Quota scan mode after data load. 0 means no quota scan. 1 means quota scan. 2 means scan quota if the user has quota restrictions. Default 0 or the value of `SFTPGO_LOADDATA_QUOTA_SCAN` environment variable.
- `--log-compress` boolean. Determine if the rotated log files should be compressed using gzip. Default `false` or the value of `SFTPGO_LOG_COMPRESS` environment variable (1 or `true`, 0 or `false`). It is unused if `log-file-path` is empty. - `--log-compress` boolean. Determine if the rotated log files should be compressed using gzip. Default `false` or the value of `SFTPGO_LOG_COMPRESS` environment variable (1 or `true`, 0 or `false`). It is unused if `log-file-path` is empty.
- `--log-file-path` string. Location for the log file, default "sftpgo.log" or the value of `SFTPGO_LOG_FILE_PATH` environment variable. Leave empty to write logs to the standard error. - `--log-file-path` string. Location for the log file, default "sftpgo.log" or the value of `SFTPGO_LOG_FILE_PATH` environment variable. Leave empty to write logs to the standard error.
- `--log-max-age` int. Maximum number of days to retain old log files. Default 28 or the value of `SFTPGO_LOG_MAX_AGE` environment variable. It is unused if `log-file-path` is empty. - `--log-max-age` int. Maximum number of days to retain old log files. Default 28 or the value of `SFTPGO_LOG_MAX_AGE` environment variable. It is unused if `log-file-path` is empty.
- `--log-max-backups` int. Maximum number of old log files to retain. Default 5 or the value of `SFTPGO_LOG_MAX_BACKUPS` environment variable. It is unused if `log-file-path` is empty. - `--log-max-backups` int. Maximum number of old log files to retain. Default 5 or the value of `SFTPGO_LOG_MAX_BACKUPS` environment variable. It is unused if `log-file-path` is empty.
- `--log-max-size` int. Maximum size in megabytes of the log file before it gets rotated. Default 10 or the value of `SFTPGO_LOG_MAX_SIZE` environment variable. It is unused if `log-file-path` is empty. - `--log-max-size` int. Maximum size in megabytes of the log file before it gets rotated. Default 10 or the value of `SFTPGO_LOG_MAX_SIZE` environment variable. It is unused if `log-file-path` is empty.
- `--log-verbose` boolean. Enable verbose logs. Default `true` or the value of `SFTPGO_LOG_VERBOSE` environment variable (1 or `true`, 0 or `false`). - `--log-verbose` boolean. Enable verbose logs. Default `true` or the value of `SFTPGO_LOG_VERBOSE` environment variable (1 or `true`, 0 or `false`).
- `--profiler` boolean. Enable the built-in profiler. The profiler will be accessible via HTTP/HTTPS using the base URL "/debug/pprof/". Default `false` or the value of `SFTPGO_PROFILER` environment variable (1 or `true`, 0 or `false`).
Log file can be rotated on demand sending a `SIGUSR1` signal on Unix based systems and using the command `sftpgo service rotatelogs` on Windows. Log file can be rotated on demand sending a `SIGUSR1` signal on Unix based systems and using the command `sftpgo service rotatelogs` on Windows.
If you don't configure any private host key, the daemon will use `id_rsa`, `id_ecdsa` and `id_ed25519` in the configuration directory. If these files don't exist, the daemon will attempt to autogenerate them (if the user that executes SFTPGo has write access to the `config-dir`). The server supports any private key format supported by [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/keys.go#L33). If you don't configure any private host key, the daemon will use `id_rsa`, `id_ecdsa` and `id_ed25519` in the configuration directory. If these files don't exist, the daemon will attempt to autogenerate them. The server supports any private key format supported by [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/keys.go#L33).
The `gen` command allows to generate completion scripts for your shell and man pages. Currently the man pages visual representation is wrong, take a look at this upstream [bug](https://github.com/spf13/cobra/issues/1049) for more details. The `gen` command allows to generate completion scripts for your shell and man pages.
## Configuration file ## Configuration file
@ -182,7 +187,7 @@ If you want the default host keys generation in a directory different from the c
] ]
``` ```
then SFTPGo will try to create `id_rsa`, `id_ecdsa` and `id_ed25519`, if they are missing, inside the existing directory `/etc/sftpgo/keys`. then SFTPGo will try to create `id_rsa`, `id_ecdsa` and `id_ed25519`, if they are missing, inside the directory `/etc/sftpgo/keys`.
The configuration can be read from JSON, TOML, YAML, HCL, envfile and Java properties config files. If your `config-file` flag is set to `sftpgo` (default value), you need to create a configuration file called `sftpgo.json` or `sftpgo.yaml` and so on inside `config-dir`. The configuration can be read from JSON, TOML, YAML, HCL, envfile and Java properties config files. If your `config-file` flag is set to `sftpgo` (default value), you need to create a configuration file called `sftpgo.json` or `sftpgo.yaml` and so on inside `config-dir`.

View file

@ -144,7 +144,7 @@ Search for the `data_provider` section and change it as follow.
} }
``` ```
This way we set the PostgreSQL connection parameters and a default base directory for new users. This way we set the PostgreSQL connection parameters.
If you want to connect to PostgreSQL over a Unix Domain socket you have to set the value `/var/run/postgresql` for the `host` configuration key instead of `127.0.0.1`. If you want to connect to PostgreSQL over a Unix Domain socket you have to set the value `/var/run/postgresql` for the `host` configuration key instead of `127.0.0.1`.
@ -212,4 +212,4 @@ Click `Add` and fill the user details, the minimum required parameters are:
You are done! Now you can connect to you SFTPGo instance using any compatible `sftp` client on port `2022`. You are done! Now you can connect to you SFTPGo instance using any compatible `sftp` client on port `2022`.
You can mix S3 users with local users but please be aware that we are running the service as the unprivileged `sftpgo` system user so if you set storage as `local` for an SFTPGo virtual user then the home directory for this user must be owned by the `sftpgo` system user. If you don't specify an home directory the default will be `/var/lib/sftpgo/users/<username>` which should be appropriate. You can mix S3 users with local users but please be aware that we are running the service as the unprivileged `sftpgo` system user so if you set storage as `local` for an SFTPGo virtual user then the home directory for this user must be owned by the `sftpgo` system user. If you don't specify an home directory the default will be `/srv/sftpgo/data/<username>` which should be appropriate.

View file

@ -85,9 +85,9 @@ func loadData(w http.ResponseWriter, r *http.Request) {
sendAPIResponse(w, r, err, "", getRespStatus(err)) sendAPIResponse(w, r, err, "", getRespStatus(err))
return return
} }
if fi.Size() > maxRestoreSize { if fi.Size() > MaxRestoreSize {
sendAPIResponse(w, r, err, fmt.Sprintf("Unable to restore input file: %#v size too big: %v/%v bytes", sendAPIResponse(w, r, err, fmt.Sprintf("Unable to restore input file: %#v size too big: %v/%v bytes",
inputFile, fi.Size(), maxRestoreSize), http.StatusBadRequest) inputFile, fi.Size(), MaxRestoreSize), http.StatusBadRequest)
return return
} }
@ -103,12 +103,12 @@ func loadData(w http.ResponseWriter, r *http.Request) {
return return
} }
if err = restoreFolders(dump.Folders, inputFile, scanQuota); err != nil { if err = RestoreFolders(dump.Folders, inputFile, scanQuota); err != nil {
sendAPIResponse(w, r, err, "", getRespStatus(err)) sendAPIResponse(w, r, err, "", getRespStatus(err))
return return
} }
if err = restoreUsers(dump.Users, inputFile, mode, scanQuota); err != nil { if err = RestoreUsers(dump.Users, inputFile, mode, scanQuota); err != nil {
sendAPIResponse(w, r, err, "", getRespStatus(err)) sendAPIResponse(w, r, err, "", getRespStatus(err))
return return
} }
@ -140,7 +140,8 @@ func getLoaddataOptions(r *http.Request) (string, int, int, error) {
return inputFile, scanQuota, restoreMode, err return inputFile, scanQuota, restoreMode, err
} }
func restoreFolders(folders []vfs.BaseVirtualFolder, inputFile string, scanQuota int) error { // RestoreFolders restores the specified folders
func RestoreFolders(folders []vfs.BaseVirtualFolder, inputFile string, scanQuota int) error {
for _, folder := range folders { for _, folder := range folders {
_, err := dataprovider.GetFolderByPath(folder.MappedPath) _, err := dataprovider.GetFolderByPath(folder.MappedPath)
if err == nil { if err == nil {
@ -163,7 +164,8 @@ func restoreFolders(folders []vfs.BaseVirtualFolder, inputFile string, scanQuota
return nil return nil
} }
func restoreUsers(users []dataprovider.User, inputFile string, mode, scanQuota int) error { // RestoreUsers restores the specified users
func RestoreUsers(users []dataprovider.User, inputFile string, mode, scanQuota int) error {
for _, user := range users { for _, user := range users {
u, err := dataprovider.UserExists(user.Username) u, err := dataprovider.UserExists(user.Username)
if err == nil { if err == nil {

View file

@ -43,8 +43,9 @@ const (
webFoldersPath = "/web/folders" webFoldersPath = "/web/folders"
webFolderPath = "/web/folder" webFolderPath = "/web/folder"
webStaticFilesPath = "/static" webStaticFilesPath = "/static"
maxRestoreSize = 10485760 // 10 MB // MaxRestoreSize defines the max size for the loaddata input file
maxRequestSize = 1048576 // 1MB MaxRestoreSize = 10485760 // 10 MB
maxRequestSize = 1048576 // 1MB
) )
var ( var (

View file

@ -2,6 +2,10 @@
package service package service
import ( import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath" "path/filepath"
"github.com/rs/zerolog" "github.com/rs/zerolog"
@ -9,6 +13,7 @@ import (
"github.com/drakkan/sftpgo/common" "github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/config" "github.com/drakkan/sftpgo/config"
"github.com/drakkan/sftpgo/dataprovider" "github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/httpd"
"github.com/drakkan/sftpgo/logger" "github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils" "github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/version" "github.com/drakkan/sftpgo/version"
@ -24,19 +29,23 @@ var (
// Service defines the SFTPGo service // Service defines the SFTPGo service
type Service struct { type Service struct {
ConfigDir string ConfigDir string
ConfigFile string ConfigFile string
LogFilePath string LogFilePath string
LogMaxSize int LogMaxSize int
LogMaxBackups int LogMaxBackups int
LogMaxAge int LogMaxAge int
PortableMode int PortableMode int
PortableUser dataprovider.User PortableUser dataprovider.User
LogCompress bool LogCompress bool
LogVerbose bool LogVerbose bool
Profiler bool Profiler bool
Shutdown chan bool LoadDataClean bool
Error error LoadDataFrom string
LoadDataMode int
LoadDataQuotaScan int
Shutdown chan bool
Error error
} }
// Start initializes the service // Start initializes the service
@ -86,6 +95,13 @@ func (s *Service) Start() error {
} }
} }
err = s.loadInitialData()
if err != nil {
logger.Error(logSender, "", "unable to load initial data: %v", err)
logger.ErrorToConsole("unable to load initial data: %v", err)
return err
}
httpConfig := config.GetHTTPConfig() httpConfig := config.GetHTTPConfig()
httpConfig.Initialize(s.ConfigDir) httpConfig.Initialize(s.ConfigDir)
@ -165,3 +181,57 @@ func (s *Service) Stop() {
close(s.Shutdown) close(s.Shutdown)
logger.Debug(logSender, "", "Service stopped") logger.Debug(logSender, "", "Service stopped")
} }
func (s *Service) loadInitialData() error {
if s.LoadDataFrom == "" {
return nil
}
if !filepath.IsAbs(s.LoadDataFrom) {
return fmt.Errorf("invalid input_file %#v, it must be an absolute path", s.LoadDataFrom)
}
if s.LoadDataMode < 0 || s.LoadDataMode > 1 {
return fmt.Errorf("Invalid loaddata-mode %v", s.LoadDataMode)
}
if s.LoadDataQuotaScan < 0 || s.LoadDataQuotaScan > 2 {
return fmt.Errorf("Invalid loaddata-scan %v", s.LoadDataQuotaScan)
}
info, err := os.Stat(s.LoadDataFrom)
if err != nil {
return err
}
if info.Size() > httpd.MaxRestoreSize {
return fmt.Errorf("unable to restore input file %#v size too big: %v/%v bytes",
s.LoadDataFrom, info.Size(), httpd.MaxRestoreSize)
}
content, err := ioutil.ReadFile(s.LoadDataFrom)
if err != nil {
return fmt.Errorf("unable to read input file %#v: %v", s.LoadDataFrom, err)
}
var dump dataprovider.BackupData
err = json.Unmarshal(content, &dump)
if err != nil {
return fmt.Errorf("unable to parse file to restore %#v: %v", s.LoadDataFrom, err)
}
err = httpd.RestoreFolders(dump.Folders, s.LoadDataFrom, s.LoadDataQuotaScan)
if err != nil {
return fmt.Errorf("unable to restore folders from file %#v: %v", s.LoadDataFrom, err)
}
err = httpd.RestoreUsers(dump.Users, s.LoadDataFrom, s.LoadDataMode, s.LoadDataQuotaScan)
if err != nil {
return fmt.Errorf("unable to restore users from file %#v: %v", s.LoadDataFrom, err)
}
logger.Info(logSender, "", "data loaded from file %#v", s.LoadDataFrom)
logger.InfoToConsole("data loaded from file %#v", s.LoadDataFrom)
if s.LoadDataClean {
err = os.Remove(s.LoadDataFrom)
if err == nil {
logger.Info(logSender, "", "file %#v deleted after successful load", s.LoadDataFrom)
logger.InfoToConsole("file %#v deleted after successful load", s.LoadDataFrom)
} else {
logger.Warn(logSender, "", "unable to delete file %#v after successful load: %v", s.LoadDataFrom, err)
logger.WarnToConsole("unable to delete file %#v after successful load: %v", s.LoadDataFrom, err)
}
}
return nil
}