data provider: add automatic backups
Automatic backup are enabled by default, a new backup will be saved each day at midnight. The backups_path setting was moved from the httpd section to the data_provider one, please adjust your configuration file and or your env vars Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
parent
48ed3dab1f
commit
5582f5c811
10 changed files with 222 additions and 126 deletions
|
@ -301,12 +301,17 @@ func Init() {
|
|||
CreateDefaultAdmin: false,
|
||||
NamingRules: 0,
|
||||
IsShared: 0,
|
||||
BackupsPath: "backups",
|
||||
AutoBackup: dataprovider.AutoBackup{
|
||||
Enabled: true,
|
||||
Hour: "0",
|
||||
DayOfWeek: "*",
|
||||
},
|
||||
},
|
||||
HTTPDConfig: httpd.Conf{
|
||||
Bindings: []httpd.Binding{defaultHTTPDBinding},
|
||||
TemplatesPath: "templates",
|
||||
StaticFilesPath: "static",
|
||||
BackupsPath: "backups",
|
||||
OpenAPIPath: "openapi",
|
||||
WebRoot: "",
|
||||
CertificateFile: "",
|
||||
|
@ -1558,9 +1563,12 @@ func setViperDefaults() {
|
|||
viper.SetDefault("data_provider.create_default_admin", globalConf.ProviderConf.CreateDefaultAdmin)
|
||||
viper.SetDefault("data_provider.naming_rules", globalConf.ProviderConf.NamingRules)
|
||||
viper.SetDefault("data_provider.is_shared", globalConf.ProviderConf.IsShared)
|
||||
viper.SetDefault("data_provider.backups_path", globalConf.ProviderConf.BackupsPath)
|
||||
viper.SetDefault("data_provider.auto_backup.enabled", globalConf.ProviderConf.AutoBackup.Enabled)
|
||||
viper.SetDefault("data_provider.auto_backup.hour", globalConf.ProviderConf.AutoBackup.Hour)
|
||||
viper.SetDefault("data_provider.auto_backup.day_of_week", globalConf.ProviderConf.AutoBackup.DayOfWeek)
|
||||
viper.SetDefault("httpd.templates_path", globalConf.HTTPDConfig.TemplatesPath)
|
||||
viper.SetDefault("httpd.static_files_path", globalConf.HTTPDConfig.StaticFilesPath)
|
||||
viper.SetDefault("httpd.backups_path", globalConf.HTTPDConfig.BackupsPath)
|
||||
viper.SetDefault("httpd.openapi_path", globalConf.HTTPDConfig.OpenAPIPath)
|
||||
viper.SetDefault("httpd.web_root", globalConf.HTTPDConfig.WebRoot)
|
||||
viper.SetDefault("httpd.certificate_file", globalConf.HTTPDConfig.CertificateFile)
|
||||
|
|
|
@ -49,7 +49,6 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/httpclient"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/mfa"
|
||||
"github.com/drakkan/sftpgo/v2/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
|
@ -150,12 +149,7 @@ var (
|
|||
pbkdfPwdB64SaltPrefixes = []string{pbkdf2SHA256B64SaltPrefix}
|
||||
unixPwdPrefixes = []string{md5cryptPwdPrefix, md5cryptApr1PwdPrefix, sha512cryptPwdPrefix}
|
||||
sharedProviders = []string{PGSQLDataProviderName, MySQLDataProviderName, CockroachDataProviderName}
|
||||
logSender = "dataProvider"
|
||||
availabilityTicker *time.Ticker
|
||||
availabilityTickerDone chan bool
|
||||
updateCachesTicker *time.Ticker
|
||||
updateCachesTickerDone chan bool
|
||||
lastCachesUpdate int64
|
||||
logSender = "dataprovider"
|
||||
credentialsDirPath string
|
||||
sqlTableUsers = "users"
|
||||
sqlTableFolders = "folders"
|
||||
|
@ -231,6 +225,24 @@ type ProviderStatus struct {
|
|||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// AutoBackup defines the settings for automatic provider backups.
|
||||
// Example: hour "0" and day_of_week "*" means a backup every day at midnight.
|
||||
// The backup file name is in the format backup_<day_of_week>_<hour>.json
|
||||
// files with the same name will be overwritten
|
||||
type AutoBackup struct {
|
||||
Enabled bool `json:"enabled" mapstructure:"enabled"`
|
||||
// hour as standard cron expression. Allowed values: 0-23.
|
||||
// Allowed special characters: asterisk (*), slash (/), comma (,), hyphen (-).
|
||||
// More info about special characters here:
|
||||
// https://pkg.go.dev/github.com/robfig/cron#hdr-Special_Characters
|
||||
Hour string `json:"hour" mapstructure:"hour"`
|
||||
// Day of the week as cron expression. Allowed values: 0-6 (Sunday to Saturday).
|
||||
// Allowed special characters: asterisk (*), slash (/), comma (,), hyphen (-), question mark (?).
|
||||
// More info about special characters here:
|
||||
// https://pkg.go.dev/github.com/robfig/cron#hdr-Special_Characters
|
||||
DayOfWeek string `json:"day_of_week" mapstructure:"day_of_week"`
|
||||
}
|
||||
|
||||
// Config provider configuration
|
||||
type Config struct {
|
||||
// Driver name, must be one of the SupportedProviders
|
||||
|
@ -386,6 +398,10 @@ type Config struct {
|
|||
// For shared data providers, active transfers are persisted in the database and thus
|
||||
// quota checks between ongoing transfers will work cross multiple instances
|
||||
IsShared int `json:"is_shared" mapstructure:"is_shared"`
|
||||
// Path to the backup directory. This can be an absolute path or a path relative to the config dir
|
||||
BackupsPath string `json:"backups_path" mapstructure:"backups_path"`
|
||||
// Settings for automatic backups
|
||||
AutoBackup AutoBackup `json:"auto_backup" mapstructure:"auto_backup"`
|
||||
}
|
||||
|
||||
// GetShared returns the provider share mode
|
||||
|
@ -431,6 +447,33 @@ func (c *Config) requireCustomTLSForMySQL() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *Config) doBackup() {
|
||||
now := time.Now()
|
||||
outputFile := filepath.Join(c.BackupsPath, fmt.Sprintf("backup_%v_%v.json", now.Weekday(), now.Hour()))
|
||||
providerLog(logger.LevelDebug, "starting auto backup to file %#v", outputFile)
|
||||
err := os.MkdirAll(filepath.Dir(outputFile), 0700)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to create backup dir %#v: %v", outputFile, err)
|
||||
return
|
||||
}
|
||||
backup, err := DumpData()
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to execute backup: %v", err)
|
||||
return
|
||||
}
|
||||
dump, err := json.Marshal(backup)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to marshal backup as JSON: %v", err)
|
||||
return
|
||||
}
|
||||
err = os.WriteFile(outputFile, dump, 0600)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to save backup: %v", err)
|
||||
return
|
||||
}
|
||||
providerLog(logger.LevelDebug, "auto backup saved to %#v", outputFile)
|
||||
}
|
||||
|
||||
// ConvertName converts the given name based on the configured rules
|
||||
func ConvertName(name string) string {
|
||||
return config.convertName(name)
|
||||
|
@ -650,11 +693,11 @@ func Initialize(cnf Config, basePath string, checkAdmins bool) error {
|
|||
var err error
|
||||
config = cnf
|
||||
|
||||
if filepath.IsAbs(config.CredentialsPath) {
|
||||
credentialsDirPath = config.CredentialsPath
|
||||
} else {
|
||||
credentialsDirPath = filepath.Join(basePath, config.CredentialsPath)
|
||||
cnf.BackupsPath = getConfigPath(cnf.BackupsPath, basePath)
|
||||
if cnf.BackupsPath == "" {
|
||||
return fmt.Errorf("required directory is invalid, backup path %#v", cnf.BackupsPath)
|
||||
}
|
||||
credentialsDirPath = getConfigPath(config.CredentialsPath, basePath)
|
||||
vfs.SetCredentialsDirPath(credentialsDirPath)
|
||||
|
||||
if err = initializeHashingAlgo(&cnf); err != nil {
|
||||
|
@ -698,10 +741,8 @@ func Initialize(cnf Config, basePath string, checkAdmins bool) error {
|
|||
return err
|
||||
}
|
||||
atomic.StoreInt32(&isAdminCreated, int32(len(admins)))
|
||||
startAvailabilityTimer()
|
||||
startUpdateCachesTimer()
|
||||
delayedQuotaUpdater.start()
|
||||
return nil
|
||||
return startScheduler()
|
||||
}
|
||||
|
||||
func validateHooks() error {
|
||||
|
@ -733,6 +774,11 @@ func validateHooks() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetBackupsPath returns the normalized backups path
|
||||
func GetBackupsPath() string {
|
||||
return config.BackupsPath
|
||||
}
|
||||
|
||||
func initializeHashingAlgo(cnf *Config) error {
|
||||
argon2Params = &argon2id.Params{
|
||||
Memory: cnf.PasswordHashing.Argon2Options.Memory,
|
||||
|
@ -1541,7 +1587,7 @@ func GetFolders(limit, offset int, order string) ([]vfs.BaseVirtualFolder, error
|
|||
return provider.getFolders(limit, offset, order)
|
||||
}
|
||||
|
||||
// DumpData returns all users and folders
|
||||
// DumpData returns all users, folders, admins, api keys, shares
|
||||
func DumpData() (BackupData, error) {
|
||||
var data BackupData
|
||||
users, err := provider.dumpUsers()
|
||||
|
@ -1604,16 +1650,7 @@ func GetProviderStatus() ProviderStatus {
|
|||
// This method is used in test cases.
|
||||
// Closing an uninitialized provider is not supported
|
||||
func Close() error {
|
||||
if availabilityTicker != nil {
|
||||
availabilityTicker.Stop()
|
||||
availabilityTickerDone <- true
|
||||
availabilityTicker = nil
|
||||
}
|
||||
if updateCachesTicker != nil {
|
||||
updateCachesTicker.Stop()
|
||||
updateCachesTickerDone <- true
|
||||
updateCachesTicker = nil
|
||||
}
|
||||
stopScheduler()
|
||||
return provider.close()
|
||||
}
|
||||
|
||||
|
@ -2550,73 +2587,6 @@ func getSSLMode() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func checkCacheUpdates() {
|
||||
providerLog(logger.LevelDebug, "start caches check, update time %v", util.GetTimeFromMsecSinceEpoch(lastCachesUpdate))
|
||||
checkTime := util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
users, err := provider.getRecentlyUpdatedUsers(lastCachesUpdate)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to get recently updated users: %v", err)
|
||||
return
|
||||
}
|
||||
for _, user := range users {
|
||||
providerLog(logger.LevelDebug, "invalidate caches for user %#v", user.Username)
|
||||
webDAVUsersCache.swap(&user)
|
||||
cachedPasswords.Remove(user.Username)
|
||||
}
|
||||
|
||||
lastCachesUpdate = checkTime
|
||||
providerLog(logger.LevelDebug, "end caches check, new update time %v", util.GetTimeFromMsecSinceEpoch(lastCachesUpdate))
|
||||
}
|
||||
|
||||
func startUpdateCachesTimer() {
|
||||
if config.IsShared == 0 {
|
||||
return
|
||||
}
|
||||
if !util.IsStringInSlice(config.Driver, sharedProviders) {
|
||||
providerLog(logger.LevelError, "update caches not supported for provider %v", config.Driver)
|
||||
return
|
||||
}
|
||||
lastCachesUpdate = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
providerLog(logger.LevelDebug, "update caches check started for provider %v", config.Driver)
|
||||
updateCachesTicker = time.NewTicker(10 * time.Minute)
|
||||
updateCachesTickerDone = make(chan bool)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-updateCachesTickerDone:
|
||||
return
|
||||
case <-updateCachesTicker.C:
|
||||
checkCacheUpdates()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func startAvailabilityTimer() {
|
||||
availabilityTicker = time.NewTicker(30 * time.Second)
|
||||
availabilityTickerDone = make(chan bool)
|
||||
checkDataprovider()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-availabilityTickerDone:
|
||||
return
|
||||
case <-availabilityTicker.C:
|
||||
checkDataprovider()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func checkDataprovider() {
|
||||
err := provider.checkAvailability()
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "check availability error: %v", err)
|
||||
}
|
||||
metric.UpdateDataProviderAvailability(err)
|
||||
}
|
||||
|
||||
func terminateInteractiveAuthProgram(cmd *exec.Cmd, isFinished bool) {
|
||||
if isFinished {
|
||||
return
|
||||
|
@ -3417,6 +3387,16 @@ func isLastActivityRecent(lastActivity int64, minDelay time.Duration) bool {
|
|||
return diff < minDelay
|
||||
}
|
||||
|
||||
func getConfigPath(name, configDir string) string {
|
||||
if !util.IsFileInputValid(name) {
|
||||
return ""
|
||||
}
|
||||
if name != "" && !filepath.IsAbs(name) {
|
||||
return filepath.Join(configDir, name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func providerLog(level logger.LogLevel, format string, v ...interface{}) {
|
||||
logger.Log(level, logSender, "", format, v...)
|
||||
}
|
||||
|
|
91
dataprovider/scheduler.go
Normal file
91
dataprovider/scheduler.go
Normal file
|
@ -0,0 +1,91 @@
|
|||
package dataprovider
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/robfig/cron/v3"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
scheduler *cron.Cron
|
||||
lastCachesUpdate int64
|
||||
)
|
||||
|
||||
func stopScheduler() {
|
||||
if scheduler != nil {
|
||||
scheduler.Stop()
|
||||
scheduler = nil
|
||||
}
|
||||
}
|
||||
|
||||
func startScheduler() error {
|
||||
stopScheduler()
|
||||
|
||||
scheduler = cron.New()
|
||||
_, err := scheduler.AddFunc("@every 30s", checkDataprovider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to schedule dataprovider availability check: %w", err)
|
||||
}
|
||||
|
||||
if config.AutoBackup.Enabled {
|
||||
spec := fmt.Sprintf("0 %v * * %v", config.AutoBackup.Hour, config.AutoBackup.DayOfWeek)
|
||||
_, err = scheduler.AddFunc(spec, config.doBackup)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to schedule auto backup: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = addScheduledCacheUpdates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scheduler.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
func addScheduledCacheUpdates() error {
|
||||
if config.IsShared == 0 {
|
||||
return nil
|
||||
}
|
||||
if !util.IsStringInSlice(config.Driver, sharedProviders) {
|
||||
providerLog(logger.LevelError, "update caches not supported for provider %v", config.Driver)
|
||||
return nil
|
||||
}
|
||||
lastCachesUpdate = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
_, err := scheduler.AddFunc("@every 10m", checkCacheUpdates)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to schedule cache updates: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDataprovider() {
|
||||
err := provider.checkAvailability()
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "check availability error: %v", err)
|
||||
}
|
||||
metric.UpdateDataProviderAvailability(err)
|
||||
}
|
||||
|
||||
func checkCacheUpdates() {
|
||||
providerLog(logger.LevelDebug, "start caches check, update time %v", util.GetTimeFromMsecSinceEpoch(lastCachesUpdate))
|
||||
checkTime := util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
users, err := provider.getRecentlyUpdatedUsers(lastCachesUpdate)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to get recently updated users: %v", err)
|
||||
return
|
||||
}
|
||||
for _, user := range users {
|
||||
providerLog(logger.LevelDebug, "invalidate caches for user %#v", user.Username)
|
||||
webDAVUsersCache.swap(&user)
|
||||
cachedPasswords.Remove(user.Username)
|
||||
}
|
||||
|
||||
lastCachesUpdate = checkTime
|
||||
providerLog(logger.LevelDebug, "end caches check, new update time %v", util.GetTimeFromMsecSinceEpoch(lastCachesUpdate))
|
||||
}
|
|
@ -196,7 +196,7 @@ The configuration file contains the following sections:
|
|||
- `external_auth_hook`, string. Absolute path to an external program or an HTTP URL to invoke for users authentication. See [External Authentication](./external-auth.md) for more details. Leave empty to disable.
|
||||
- `external_auth_scope`, integer. 0 means all supported authentication scopes (passwords, public keys and keyboard interactive). 1 means passwords only. 2 means public keys only. 4 means key keyboard interactive only. 8 means TLS certificate. The flags can be combined, for example 6 means public keys and keyboard interactive
|
||||
- `credentials_path`, string. It defines the directory for storing user provided credential files such as Google Cloud Storage credentials. This can be an absolute path or a path relative to the config dir
|
||||
- `prefer_database_credentials`, boolean. When true, users' Google Cloud Storage credentials will be written to the data provider instead of disk, though pre-existing credentials on disk will be used as a fallback. When false, they will be written to the directory specified by `credentials_path`.
|
||||
- `prefer_database_credentials`, boolean. When `true`, users' Google Cloud Storage credentials will be written to the data provider instead of disk, though pre-existing credentials on disk will be used as a fallback. When `false`, they will be written to the directory specified by `credentials_path`.
|
||||
- `pre_login_hook`, string. Absolute path to an external program or an HTTP URL to invoke to modify user details just before the login. See [Dynamic user modification](./dynamic-user-mod.md) for more details. Leave empty to disable.
|
||||
- `post_login_hook`, string. Absolute path to an external program or an HTTP URL to invoke to notify a successful or failed login. See [Post-login hook](./post-login-hook.md) for more details. Leave empty to disable.
|
||||
- `post_login_scope`, defines the scope for the post-login hook. 0 means notify both failed and successful logins. 1 means notify failed logins. 2 means notify successful logins.
|
||||
|
@ -220,6 +220,11 @@ The configuration file contains the following sections:
|
|||
- `create_default_admin`, boolean. Before you can use SFTPGo you need to create an admin account. If you open the admin web UI, a setup screen will guide you in creating the first admin account. You can automatically create the first admin account by enabling this setting and setting the environment variables `SFTPGO_DEFAULT_ADMIN_USERNAME` and `SFTPGO_DEFAULT_ADMIN_PASSWORD`. You can also create the first admin by loading initial data. This setting has no effect if an admin account is already found within the data provider. Default `false`.
|
||||
- `naming_rules`, integer. Naming rules for usernames and folder names. `0` means no rules. `1` means you can use any UTF-8 character. The names are used in URIs for REST API and Web admin. If not set only unreserved URI characters are allowed: ALPHA / DIGIT / "-" / "." / "_" / "~". `2` means names are converted to lowercase before saving/matching and so case insensitive matching is possible. `3` means trimming trailing and leading white spaces before saving/matching. Rules can be combined, for example `3` means both converting to lowercase and allowing any UTF-8 character. Enabling these options for existing installations could be backward incompatible, some users could be unable to login, for example existing users with mixed cases in their usernames. You have to ensure that all existing users respect the defined rules. Default: `0`.
|
||||
- `is_shared`, integer. If the data provider is shared across multiple SFTPGo instances, set this parameter to `1`. `MySQL`, `PostgreSQL` and `CockroachDB` can be shared, this setting is ignored for other data providers. For shared data providers, SFTPGo periodically reloads the latest updated users, based on the `updated_at` field, and updates its internal caches if users are updated from a different instance. This check, if enabled, is executed every 10 minutes. For shared data providers, active transfers are persisted in the database and thus quota checks between ongoing transfers will work cross multiple instances. Default: `0`.
|
||||
- `backups_path`, string. Path to the backup directory. This can be an absolute path or a path relative to the config dir. We don't allow backups in arbitrary paths for security reasons.
|
||||
- `auto_backup`, struct. Defines the configuration for automatic data provider backups. Example: hour `0` and day_of_week `*` means a backup every day at midnight. The backup file name is in the format `backup_<day_of_week>_<hour>.json`, files with the same name will be overwritten. Note, this process will only backup provider data (users, folders, shars, admins, api keys) and will not backup the configuration file and users files.
|
||||
- `enabled`, boolean. Set to `true` to enable automatic backups. Default: `true`.
|
||||
- `hour`, string. Hour as standard cron expression. Allowed values: 0-23. Allowed special characters: asterisk (`*`), slash (`/`), comma (`,`), hyphen (`-`). More info about special characters [here](https://pkg.go.dev/github.com/robfig/cron#hdr-Special_Characters). Default: `0`.
|
||||
- `day_of_week`, string. Day of week as standard cron expression. Allowed values: 0-6 (Sunday to Saturday). Allowed special characters: asterisk (`*`), slash (`/`), comma (`,`), hyphen (`-`), question mark (`?`). More info about special characters [here](https://pkg.go.dev/github.com/robfig/cron#hdr-Special_Characters). Default: `*`.
|
||||
- **"httpd"**, the configuration for the HTTP server used to serve REST API and to expose the built-in web interface
|
||||
- `bindings`, list of structs. Each struct has the following fields:
|
||||
- `port`, integer. The port used for serving HTTP requests. Default: 8080.
|
||||
|
@ -259,7 +264,6 @@ The configuration file contains the following sections:
|
|||
- `expect_ct_header`, string. Allows to set the `Expect-CT` header value. Default: blank.
|
||||
- `templates_path`, string. Path to the HTML web templates. This can be an absolute path or a path relative to the config dir
|
||||
- `static_files_path`, string. Path to the static files for the web interface. This can be an absolute path or a path relative to the config dir. If both `templates_path` and `static_files_path` are empty the built-in web interface will be disabled
|
||||
- `backups_path`, string. Path to the backup directory. This can be an absolute path or a path relative to the config dir. We don't allow backups in arbitrary paths for security reasons
|
||||
- `openapi_path`, string. Path to the directory that contains the OpenAPI schema and the default renderer. This can be an absolute path or a path relative to the config dir. If empty the OpenAPI schema and the renderer will not be served regardless of the `render_openapi` directive
|
||||
- `web_root`, string. Defines a base URL for the web admin and client interfaces. If empty web admin and client resources will be available at the root ("/") URI. If defined it must be an absolute URI or it will be ignored
|
||||
- `certificate_file`, string. Certificate for HTTPS. This can be an absolute path or a path relative to the config dir.
|
||||
|
|
5
go.mod
5
go.mod
|
@ -8,7 +8,7 @@ require (
|
|||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0
|
||||
github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962
|
||||
github.com/alexedwards/argon2id v0.0.0-20211130144151-3585854a6387
|
||||
github.com/aws/aws-sdk-go v1.43.16
|
||||
github.com/aws/aws-sdk-go v1.43.17
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.2.8
|
||||
github.com/coreos/go-oidc/v3 v3.1.0
|
||||
github.com/eikenb/pipeat v0.0.0-20210730190139-06b3e6902001
|
||||
|
@ -38,6 +38,7 @@ require (
|
|||
github.com/pkg/sftp v1.13.5-0.20220303113417-dcfc1d5e4162
|
||||
github.com/pquerna/otp v1.3.0
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/rs/xid v1.3.0
|
||||
github.com/rs/zerolog v1.26.2-0.20220227173336-263b0bde3672
|
||||
|
@ -103,7 +104,7 @@ require (
|
|||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/miekg/dns v1.1.46 // indirect
|
||||
github.com/miekg/dns v1.1.47 // indirect
|
||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
|
|
10
go.sum
10
go.sum
|
@ -144,8 +144,8 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI
|
|||
github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.34/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.43.16 h1:Y7wBby44f+tINqJjw5fLH3vA+gFq4uMITIKqditwM14=
|
||||
github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.43.17 h1:jDPBz1UuTxmyRo0eLgaRiro0fiI1zL7lkscqYxoEDLM=
|
||||
github.com/aws/aws-sdk-go v1.43.17/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.7.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY=
|
||||
|
@ -594,8 +594,8 @@ github.com/mhale/smtpd v0.8.0/go.mod h1:MQl+y2hwIEQCXtNhe5+55n0GZOjSmeqORDIXbqUL
|
|||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.46 h1:uzwpxRtSVxtcIZmz/4Uz6/Rn7G11DvsaslXoy5LxQio=
|
||||
github.com/miekg/dns v1.1.46/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/miekg/dns v1.1.47 h1:J9bWiXbqMbnZPcY8Qi2E3EWIBsIm6MZzzJB9VRg5gL8=
|
||||
github.com/miekg/dns v1.1.47/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus=
|
||||
|
@ -678,6 +678,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
|||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U=
|
||||
|
|
|
@ -30,7 +30,7 @@ func validateBackupFile(outputFile string) (string, error) {
|
|||
if strings.Contains(outputFile, "..") {
|
||||
return "", fmt.Errorf("invalid output-file %#v", outputFile)
|
||||
}
|
||||
outputFile = filepath.Join(backupsPath, outputFile)
|
||||
outputFile = filepath.Join(dataprovider.GetBackupsPath(), outputFile)
|
||||
return outputFile, nil
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ func dumpData(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
err = os.MkdirAll(filepath.Dir(outputFile), 0700)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "dumping data error: %v, output file: %#v", err, outputFile)
|
||||
logger.Error(logSender, "", "dumping data error: %v, output file: %#v", err, outputFile)
|
||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
return
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ func dumpData(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
backup, err := dataprovider.DumpData()
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "dumping data error: %v, output file: %#v", err, outputFile)
|
||||
logger.Error(logSender, "", "dumping data error: %v, output file: %#v", err, outputFile)
|
||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
return
|
||||
}
|
||||
|
|
|
@ -159,7 +159,6 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
backupsPath string
|
||||
certMgr *common.CertManager
|
||||
cleanupTicker *time.Ticker
|
||||
cleanupDone chan bool
|
||||
|
@ -461,7 +460,7 @@ type Conf struct {
|
|||
// If both TemplatesPath and StaticFilesPath are empty the built-in web interface will be disabled
|
||||
StaticFilesPath string `json:"static_files_path" mapstructure:"static_files_path"`
|
||||
// Path to the backup directory. This can be an absolute path or a path relative to the config dir
|
||||
BackupsPath string `json:"backups_path" mapstructure:"backups_path"`
|
||||
//BackupsPath string `json:"backups_path" mapstructure:"backups_path"`
|
||||
// Path to the directory that contains the OpenAPI schema and the default renderer.
|
||||
// This can be an absolute path or a path relative to the config dir
|
||||
OpenAPIPath string `json:"openapi_path" mapstructure:"openapi_path"`
|
||||
|
@ -559,13 +558,9 @@ func (c *Conf) getRedacted() Conf {
|
|||
// Initialize configures and starts the HTTP server
|
||||
func (c *Conf) Initialize(configDir string) error {
|
||||
logger.Info(logSender, "", "initializing HTTP server with config %+v", c.getRedacted())
|
||||
backupsPath = getConfigPath(c.BackupsPath, configDir)
|
||||
staticFilesPath := getConfigPath(c.StaticFilesPath, configDir)
|
||||
templatesPath := getConfigPath(c.TemplatesPath, configDir)
|
||||
openAPIPath := getConfigPath(c.OpenAPIPath, configDir)
|
||||
if backupsPath == "" {
|
||||
return fmt.Errorf("required directory is invalid, backup path %#v", backupsPath)
|
||||
}
|
||||
if err := c.checkRequiredDirs(staticFilesPath, templatesPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -304,6 +304,14 @@ func TestMain(m *testing.M) {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
backupsPath = filepath.Join(os.TempDir(), "test_backups")
|
||||
providerConf.BackupsPath = backupsPath
|
||||
err = os.MkdirAll(backupsPath, os.ModePerm)
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("error creating backups path: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error initializing data provider: %v", err)
|
||||
|
@ -348,14 +356,6 @@ func TestMain(m *testing.M) {
|
|||
},
|
||||
}
|
||||
httpdtest.SetBaseURL(httpBaseURL)
|
||||
backupsPath = filepath.Join(os.TempDir(), "test_backups")
|
||||
httpdConf.BackupsPath = backupsPath
|
||||
err = os.MkdirAll(backupsPath, os.ModePerm)
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("error creating backups path: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// required to test sftpfs
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.Bindings = []sftpd.Binding{
|
||||
|
@ -437,7 +437,6 @@ func TestInitialization(t *testing.T) {
|
|||
httpdConf := config.GetHTTPDConfig()
|
||||
defaultTemplatesPath := httpdConf.TemplatesPath
|
||||
defaultStaticPath := httpdConf.StaticFilesPath
|
||||
httpdConf.BackupsPath = backupsPath
|
||||
httpdConf.CertificateFile = invalidFile
|
||||
httpdConf.CertificateKeyFile = invalidFile
|
||||
err = httpdConf.Initialize(configDir)
|
||||
|
@ -449,10 +448,6 @@ func TestInitialization(t *testing.T) {
|
|||
assert.Error(t, err)
|
||||
httpdConf = config.GetHTTPDConfig()
|
||||
httpdConf.TemplatesPath = defaultTemplatesPath
|
||||
httpdConf.BackupsPath = ".."
|
||||
err = httpdConf.Initialize(configDir)
|
||||
assert.Error(t, err)
|
||||
httpdConf.BackupsPath = backupsPath
|
||||
httpdConf.CertificateFile = invalidFile
|
||||
httpdConf.CertificateKeyFile = invalidFile
|
||||
httpdConf.StaticFilesPath = ""
|
||||
|
@ -1409,6 +1404,7 @@ func TestPasswordValidations(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1462,6 +1458,7 @@ func TestAdminPasswordHashing(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -3333,6 +3330,7 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -3981,6 +3979,7 @@ func TestNamingRules(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -4193,6 +4192,7 @@ func TestSaveErrors(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -4280,6 +4280,7 @@ func TestUserBaseDir(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -4326,6 +4327,7 @@ func TestQuotaTrackingDisabled(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -4484,6 +4486,7 @@ func TestProviderErrors(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf := config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -4597,6 +4600,7 @@ func TestDumpdata(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf := config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||
assert.NoError(t, err)
|
||||
_, rawResp, err := httpdtest.Dumpdata("", "", "", http.StatusBadRequest)
|
||||
|
@ -4642,6 +4646,7 @@ func TestDumpdata(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
providerConf.BackupsPath = backupsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||
|
@ -4817,6 +4822,7 @@ func TestDefenderAPIErrors(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
providerConf := config.GetProviderConf()
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
providerConf.BackupsPath = backupsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||
|
@ -12545,6 +12551,7 @@ func TestWebAdminSetupMock(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -15022,6 +15029,7 @@ func TestUserSaveFromTemplateMock(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf := config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -15220,6 +15228,7 @@ func TestFolderSaveFromTemplateMock(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf := config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -17062,6 +17071,7 @@ func TestProviderClosedMock(t *testing.T) {
|
|||
err = config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
providerConf := config.GetProviderConf()
|
||||
providerConf.BackupsPath = backupsPath
|
||||
providerConf.CredentialsPath = credentialsPath
|
||||
err = os.RemoveAll(credentialsPath)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -203,7 +203,13 @@
|
|||
"update_mode": 0,
|
||||
"create_default_admin": false,
|
||||
"naming_rules": 0,
|
||||
"is_shared": 0
|
||||
"is_shared": 0,
|
||||
"backups_path": "backups",
|
||||
"auto_backup": {
|
||||
"enabled": true,
|
||||
"hour": "0",
|
||||
"day_of_week": "*"
|
||||
}
|
||||
},
|
||||
"httpd": {
|
||||
"bindings": [
|
||||
|
@ -248,7 +254,6 @@
|
|||
"templates_path": "templates",
|
||||
"static_files_path": "static",
|
||||
"openapi_path": "openapi",
|
||||
"backups_path": "backups",
|
||||
"web_root": "",
|
||||
"certificate_file": "",
|
||||
"certificate_key_file": "",
|
||||
|
|
Loading…
Reference in a new issue