mirror of
https://github.com/drakkan/sftpgo.git
synced 2024-11-25 00:50:31 +00:00
squash database migrations and remove the credentials_path setting
Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
parent
6f4475ff72
commit
93ce593ed0
17 changed files with 151 additions and 979 deletions
|
@ -30,10 +30,10 @@ Several storage backends are supported: local filesystem, encrypted local filesy
|
||||||
- Per-user authentication methods.
|
- Per-user authentication methods.
|
||||||
- [Two-factor authentication](./docs/howto/two-factor-authentication.md) based on time-based one time passwords (RFC 6238) which works with Authy, Google Authenticator and other compatible apps.
|
- [Two-factor authentication](./docs/howto/two-factor-authentication.md) based on time-based one time passwords (RFC 6238) which works with Authy, Google Authenticator and other compatible apps.
|
||||||
- Simplified user administrations using [groups](./docs/groups.md).
|
- Simplified user administrations using [groups](./docs/groups.md).
|
||||||
- Custom authentication via external programs/HTTP API.
|
- Custom authentication via [external programs/HTTP API](./docs/external-auth.md).
|
||||||
- Web Client and Web Admin user interfaces support [OpenID Connect](https://openid.net/connect/) authentication and so they can be integrated with identity providers such as [Keycloak](https://www.keycloak.org/). You can find more details [here](./docs/oidc.md).
|
- Web Client and Web Admin user interfaces support [OpenID Connect](https://openid.net/connect/) authentication and so they can be integrated with identity providers such as [Keycloak](https://www.keycloak.org/). You can find more details [here](./docs/oidc.md).
|
||||||
- [Data At Rest Encryption](./docs/dare.md).
|
- [Data At Rest Encryption](./docs/dare.md).
|
||||||
- Dynamic user modification before login via external programs/HTTP API.
|
- Dynamic user modification before login via [external programs/HTTP API](./docs/dynamic-user-mod.md).
|
||||||
- Quota support: accounts can have individual disk quota expressed as max total size and/or max number of files.
|
- Quota support: accounts can have individual disk quota expressed as max total size and/or max number of files.
|
||||||
- Bandwidth throttling, with separate settings for upload and download and overrides based on the client's IP address.
|
- Bandwidth throttling, with separate settings for upload and download and overrides based on the client's IP address.
|
||||||
- Data transfer bandwidth limits, with total limit or separate settings for uploads and downloads and overrides based on the client's IP address. Limits can be reset using the REST API.
|
- Data transfer bandwidth limits, with total limit or separate settings for uploads and downloads and overrides based on the client's IP address. Limits can be reset using the REST API.
|
||||||
|
|
|
@ -310,7 +310,6 @@ func Init() {
|
||||||
},
|
},
|
||||||
ExternalAuthHook: "",
|
ExternalAuthHook: "",
|
||||||
ExternalAuthScope: 0,
|
ExternalAuthScope: 0,
|
||||||
CredentialsPath: "credentials",
|
|
||||||
PreLoginHook: "",
|
PreLoginHook: "",
|
||||||
PostLoginHook: "",
|
PostLoginHook: "",
|
||||||
PostLoginScope: 0,
|
PostLoginScope: 0,
|
||||||
|
@ -691,12 +690,6 @@ func resetInvalidConfigs() {
|
||||||
logger.Warn(logSender, "", "Non-fatal configuration error: %v", warn)
|
logger.Warn(logSender, "", "Non-fatal configuration error: %v", warn)
|
||||||
logger.WarnToConsole("Non-fatal configuration error: %v", warn)
|
logger.WarnToConsole("Non-fatal configuration error: %v", warn)
|
||||||
}
|
}
|
||||||
if globalConf.ProviderConf.CredentialsPath == "" {
|
|
||||||
warn := "invalid credentials path, reset to \"credentials\""
|
|
||||||
globalConf.ProviderConf.CredentialsPath = "credentials"
|
|
||||||
logger.Warn(logSender, "", "Non-fatal configuration error: %v", warn)
|
|
||||||
logger.WarnToConsole("Non-fatal configuration error: %v", warn)
|
|
||||||
}
|
|
||||||
if globalConf.Common.DefenderConfig.Enabled && globalConf.Common.DefenderConfig.Driver == common.DefenderDriverProvider {
|
if globalConf.Common.DefenderConfig.Enabled && globalConf.Common.DefenderConfig.Driver == common.DefenderDriverProvider {
|
||||||
if !globalConf.ProviderConf.IsDefenderSupported() {
|
if !globalConf.ProviderConf.IsDefenderSupported() {
|
||||||
warn := fmt.Sprintf("provider based defender is not supported with data provider %#v, "+
|
warn := fmt.Sprintf("provider based defender is not supported with data provider %#v, "+
|
||||||
|
@ -1853,7 +1846,6 @@ func setViperDefaults() {
|
||||||
viper.SetDefault("data_provider.actions.hook", globalConf.ProviderConf.Actions.Hook)
|
viper.SetDefault("data_provider.actions.hook", globalConf.ProviderConf.Actions.Hook)
|
||||||
viper.SetDefault("data_provider.external_auth_hook", globalConf.ProviderConf.ExternalAuthHook)
|
viper.SetDefault("data_provider.external_auth_hook", globalConf.ProviderConf.ExternalAuthHook)
|
||||||
viper.SetDefault("data_provider.external_auth_scope", globalConf.ProviderConf.ExternalAuthScope)
|
viper.SetDefault("data_provider.external_auth_scope", globalConf.ProviderConf.ExternalAuthScope)
|
||||||
viper.SetDefault("data_provider.credentials_path", globalConf.ProviderConf.CredentialsPath)
|
|
||||||
viper.SetDefault("data_provider.pre_login_hook", globalConf.ProviderConf.PreLoginHook)
|
viper.SetDefault("data_provider.pre_login_hook", globalConf.ProviderConf.PreLoginHook)
|
||||||
viper.SetDefault("data_provider.post_login_hook", globalConf.ProviderConf.PostLoginHook)
|
viper.SetDefault("data_provider.post_login_hook", globalConf.ProviderConf.PostLoginHook)
|
||||||
viper.SetDefault("data_provider.post_login_scope", globalConf.ProviderConf.PostLoginScope)
|
viper.SetDefault("data_provider.post_login_scope", globalConf.ProviderConf.PostLoginScope)
|
||||||
|
|
|
@ -184,29 +184,6 @@ func TestInvalidExternalAuthScope(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInvalidCredentialsPath(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
providerConf := config.GetProviderConf()
|
|
||||||
providerConf.CredentialsPath = ""
|
|
||||||
c := make(map[string]dataprovider.Config)
|
|
||||||
c["data_provider"] = providerConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "credentials", config.GetProviderConf().CredentialsPath)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidProxyProtocol(t *testing.T) {
|
func TestInvalidProxyProtocol(t *testing.T) {
|
||||||
reset()
|
reset()
|
||||||
|
|
||||||
|
|
|
@ -1954,18 +1954,11 @@ func (p *BoltProvider) migrateDatabase() error {
|
||||||
case version == boltDatabaseVersion:
|
case version == boltDatabaseVersion:
|
||||||
providerLog(logger.LevelDebug, "bolt database is up to date, current version: %v", version)
|
providerLog(logger.LevelDebug, "bolt database is up to date, current version: %v", version)
|
||||||
return ErrNoInitRequired
|
return ErrNoInitRequired
|
||||||
case version < 15:
|
case version < 19:
|
||||||
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
||||||
providerLog(logger.LevelError, "%v", err)
|
providerLog(logger.LevelError, "%v", err)
|
||||||
logger.ErrorToConsole("%v", err)
|
logger.ErrorToConsole("%v", err)
|
||||||
return err
|
return err
|
||||||
case version == 15, version == 16, version == 17, version == 18:
|
|
||||||
logger.InfoToConsole(fmt.Sprintf("updating database version: %v -> 19", version))
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: %v -> 19", version)
|
|
||||||
if err = importGCSCredentials(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updateBoltDatabaseVersion(p.dbHandle, 19)
|
|
||||||
default:
|
default:
|
||||||
if version > boltDatabaseVersion {
|
if version > boltDatabaseVersion {
|
||||||
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
||||||
|
@ -1987,8 +1980,6 @@ func (p *BoltProvider) revertDatabase(targetVersion int) error {
|
||||||
return errors.New("current version match target version, nothing to do")
|
return errors.New("current version match target version, nothing to do")
|
||||||
}
|
}
|
||||||
switch dbVersion.Version {
|
switch dbVersion.Version {
|
||||||
case 16, 17, 18, 19:
|
|
||||||
return updateBoltDatabaseVersion(p.dbHandle, 15)
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
||||||
}
|
}
|
||||||
|
@ -2405,7 +2396,7 @@ func getBoltDatabaseVersion(dbHandle *bolt.DB) (schemaVersion, error) {
|
||||||
v := bucket.Get(dbVersionKey)
|
v := bucket.Get(dbVersionKey)
|
||||||
if v == nil {
|
if v == nil {
|
||||||
dbVersion = schemaVersion{
|
dbVersion = schemaVersion{
|
||||||
Version: 15,
|
Version: 19,
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -2414,7 +2405,7 @@ func getBoltDatabaseVersion(dbHandle *bolt.DB) (schemaVersion, error) {
|
||||||
return dbVersion, err
|
return dbVersion, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateBoltDatabaseVersion(dbHandle *bolt.DB, version int) error {
|
/*func updateBoltDatabaseVersion(dbHandle *bolt.DB, version int) error {
|
||||||
err := dbHandle.Update(func(tx *bolt.Tx) error {
|
err := dbHandle.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket(dbVersionBucket)
|
bucket := tx.Bucket(dbVersionBucket)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
|
@ -2430,4 +2421,4 @@ func updateBoltDatabaseVersion(dbHandle *bolt.DB, version int) error {
|
||||||
return bucket.Put(dbVersionKey, buf)
|
return bucket.Put(dbVersionKey, buf)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}*/
|
||||||
|
|
|
@ -155,7 +155,6 @@ var (
|
||||||
unixPwdPrefixes = []string{md5cryptPwdPrefix, md5cryptApr1PwdPrefix, sha512cryptPwdPrefix}
|
unixPwdPrefixes = []string{md5cryptPwdPrefix, md5cryptApr1PwdPrefix, sha512cryptPwdPrefix}
|
||||||
sharedProviders = []string{PGSQLDataProviderName, MySQLDataProviderName, CockroachDataProviderName}
|
sharedProviders = []string{PGSQLDataProviderName, MySQLDataProviderName, CockroachDataProviderName}
|
||||||
logSender = "dataprovider"
|
logSender = "dataprovider"
|
||||||
credentialsDirPath string
|
|
||||||
sqlTableUsers = "users"
|
sqlTableUsers = "users"
|
||||||
sqlTableFolders = "folders"
|
sqlTableFolders = "folders"
|
||||||
sqlTableFoldersMapping = "folders_mapping"
|
sqlTableFoldersMapping = "folders_mapping"
|
||||||
|
@ -322,10 +321,6 @@ type Config struct {
|
||||||
// you can combine the scopes, for example 3 means password and public key, 5 password and keyboard
|
// you can combine the scopes, for example 3 means password and public key, 5 password and keyboard
|
||||||
// interactive and so on
|
// interactive and so on
|
||||||
ExternalAuthScope int `json:"external_auth_scope" mapstructure:"external_auth_scope"`
|
ExternalAuthScope int `json:"external_auth_scope" mapstructure:"external_auth_scope"`
|
||||||
// CredentialsPath defines the directory for storing user provided credential files such as
|
|
||||||
// Google Cloud Storage credentials. It can be a path relative to the config dir or an
|
|
||||||
// absolute path
|
|
||||||
CredentialsPath string `json:"credentials_path" mapstructure:"credentials_path"`
|
|
||||||
// Absolute path to an external program or an HTTP URL to invoke just before the user login.
|
// Absolute path to an external program or an HTTP URL to invoke just before the user login.
|
||||||
// This program/URL allows to modify or create the user trying to login.
|
// This program/URL allows to modify or create the user trying to login.
|
||||||
// It is useful if you have users with dynamic fields to update just before the login.
|
// It is useful if you have users with dynamic fields to update just before the login.
|
||||||
|
@ -719,8 +714,6 @@ func Initialize(cnf Config, basePath string, checkAdmins bool) error {
|
||||||
if cnf.BackupsPath == "" {
|
if cnf.BackupsPath == "" {
|
||||||
return fmt.Errorf("required directory is invalid, backup path %#v", cnf.BackupsPath)
|
return fmt.Errorf("required directory is invalid, backup path %#v", cnf.BackupsPath)
|
||||||
}
|
}
|
||||||
credentialsDirPath = getConfigPath(config.CredentialsPath, basePath)
|
|
||||||
vfs.SetCredentialsDirPath(credentialsDirPath)
|
|
||||||
|
|
||||||
if err = initializeHashingAlgo(&cnf); err != nil {
|
if err = initializeHashingAlgo(&cnf); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -867,13 +860,6 @@ func checkDefaultAdmin() error {
|
||||||
func InitializeDatabase(cnf Config, basePath string) error {
|
func InitializeDatabase(cnf Config, basePath string) error {
|
||||||
config = cnf
|
config = cnf
|
||||||
|
|
||||||
if filepath.IsAbs(config.CredentialsPath) {
|
|
||||||
credentialsDirPath = config.CredentialsPath
|
|
||||||
} else {
|
|
||||||
credentialsDirPath = filepath.Join(basePath, config.CredentialsPath)
|
|
||||||
}
|
|
||||||
vfs.SetCredentialsDirPath(credentialsDirPath)
|
|
||||||
|
|
||||||
if err := initializeHashingAlgo(&cnf); err != nil {
|
if err := initializeHashingAlgo(&cnf); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -893,12 +879,6 @@ func InitializeDatabase(cnf Config, basePath string) error {
|
||||||
func RevertDatabase(cnf Config, basePath string, targetVersion int) error {
|
func RevertDatabase(cnf Config, basePath string, targetVersion int) error {
|
||||||
config = cnf
|
config = cnf
|
||||||
|
|
||||||
if filepath.IsAbs(config.CredentialsPath) {
|
|
||||||
credentialsDirPath = config.CredentialsPath
|
|
||||||
} else {
|
|
||||||
credentialsDirPath = filepath.Join(basePath, config.CredentialsPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := createProvider(basePath)
|
err := createProvider(basePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -914,12 +894,6 @@ func RevertDatabase(cnf Config, basePath string, targetVersion int) error {
|
||||||
func ResetDatabase(cnf Config, basePath string) error {
|
func ResetDatabase(cnf Config, basePath string) error {
|
||||||
config = cnf
|
config = cnf
|
||||||
|
|
||||||
if filepath.IsAbs(config.CredentialsPath) {
|
|
||||||
credentialsDirPath = config.CredentialsPath
|
|
||||||
} else {
|
|
||||||
credentialsDirPath = filepath.Join(basePath, config.CredentialsPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := createProvider(basePath); err != nil {
|
if err := createProvider(basePath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -3650,89 +3624,6 @@ func isLastActivityRecent(lastActivity int64, minDelay time.Duration) bool {
|
||||||
return diff < minDelay
|
return diff < minDelay
|
||||||
}
|
}
|
||||||
|
|
||||||
func addGCSCredentialsToFolder(folder *vfs.BaseVirtualFolder) (bool, error) {
|
|
||||||
if folder.FsConfig.Provider != sdk.GCSFilesystemProvider {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
if folder.FsConfig.GCSConfig.AutomaticCredentials > 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
if folder.FsConfig.GCSConfig.Credentials.IsValid() {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
cred, err := os.ReadFile(folder.GetGCSCredentialsFilePath())
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(cred, &folder.FsConfig.GCSConfig.Credentials)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addGCSCredentialsToUser(user *User) (bool, error) {
|
|
||||||
if user.FsConfig.Provider != sdk.GCSFilesystemProvider {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
if user.FsConfig.GCSConfig.AutomaticCredentials > 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
if user.FsConfig.GCSConfig.Credentials.IsValid() {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
cred, err := os.ReadFile(user.GetGCSCredentialsFilePath())
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(cred, &user.FsConfig.GCSConfig.Credentials)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func importGCSCredentials() error {
|
|
||||||
folders, err := provider.dumpFolders()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to get folders: %w", err)
|
|
||||||
}
|
|
||||||
for idx := range folders {
|
|
||||||
folder := &folders[idx]
|
|
||||||
added, err := addGCSCredentialsToFolder(folder)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to add GCS credentials to folder %#v: %w", folder.Name, err)
|
|
||||||
}
|
|
||||||
if added {
|
|
||||||
logger.InfoToConsole("importing GCS credentials for folder %#v", folder.Name)
|
|
||||||
providerLog(logger.LevelInfo, "importing GCS credentials for folder %#v", folder.Name)
|
|
||||||
if err = provider.updateFolder(folder); err != nil {
|
|
||||||
return fmt.Errorf("unable to update folder %#v: %w", folder.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
users, err := provider.dumpUsers()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to get users: %w", err)
|
|
||||||
}
|
|
||||||
for idx := range users {
|
|
||||||
user := &users[idx]
|
|
||||||
added, err := addGCSCredentialsToUser(user)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to add GCS credentials to user %#v: %w", user.Username, err)
|
|
||||||
}
|
|
||||||
if added {
|
|
||||||
logger.InfoToConsole("importing GCS credentials for user %#v", user.Username)
|
|
||||||
providerLog(logger.LevelInfo, "importing GCS credentials for user %#v", user.Username)
|
|
||||||
if err = provider.updateUser(user); err != nil {
|
|
||||||
return fmt.Errorf("unable to update user %#v: %w", user.Username, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getConfigPath(name, configDir string) string {
|
func getConfigPath(name, configDir string) string {
|
||||||
if !util.IsFileInputValid(name) {
|
if !util.IsFileInputValid(name) {
|
||||||
return ""
|
return ""
|
||||||
|
|
|
@ -42,6 +42,11 @@ const (
|
||||||
"`description` varchar(512) NULL, `password` varchar(255) NOT NULL, `email` varchar(255) NULL, `status` integer NOT NULL, " +
|
"`description` varchar(512) NULL, `password` varchar(255) NOT NULL, `email` varchar(255) NULL, `status` integer NOT NULL, " +
|
||||||
"`permissions` longtext NOT NULL, `filters` longtext NULL, `additional_info` longtext NULL, `last_login` bigint NOT NULL, " +
|
"`permissions` longtext NOT NULL, `filters` longtext NULL, `additional_info` longtext NULL, `last_login` bigint NOT NULL, " +
|
||||||
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL);" +
|
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL);" +
|
||||||
|
"CREATE TABLE `{{active_transfers}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||||
|
"`connection_id` varchar(100) NOT NULL, `transfer_id` bigint NOT NULL, `transfer_type` integer NOT NULL, " +
|
||||||
|
"`username` varchar(255) NOT NULL, `folder_name` varchar(255) NULL, `ip` varchar(50) NOT NULL, " +
|
||||||
|
"`truncated_size` bigint NOT NULL, `current_ul_size` bigint NOT NULL, `current_dl_size` bigint NOT NULL, " +
|
||||||
|
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL);" +
|
||||||
"CREATE TABLE `{{defender_hosts}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
"CREATE TABLE `{{defender_hosts}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||||
"`ip` varchar(50) NOT NULL UNIQUE, `ban_time` bigint NOT NULL, `updated_at` bigint NOT NULL);" +
|
"`ip` varchar(50) NOT NULL UNIQUE, `ban_time` bigint NOT NULL, `updated_at` bigint NOT NULL);" +
|
||||||
"CREATE TABLE `{{defender_events}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
"CREATE TABLE `{{defender_events}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||||
|
@ -51,6 +56,11 @@ const (
|
||||||
"CREATE TABLE `{{folders}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(255) NOT NULL UNIQUE, " +
|
"CREATE TABLE `{{folders}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(255) NOT NULL UNIQUE, " +
|
||||||
"`description` varchar(512) NULL, `path` longtext NULL, `used_quota_size` bigint NOT NULL, " +
|
"`description` varchar(512) NULL, `path` longtext NULL, `used_quota_size` bigint NOT NULL, " +
|
||||||
"`used_quota_files` integer NOT NULL, `last_quota_update` bigint NOT NULL, `filesystem` longtext NULL);" +
|
"`used_quota_files` integer NOT NULL, `last_quota_update` bigint NOT NULL, `filesystem` longtext NULL);" +
|
||||||
|
"CREATE TABLE `{{groups}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||||
|
"`name` varchar(255) NOT NULL UNIQUE, `description` varchar(512) NULL, `created_at` bigint NOT NULL, " +
|
||||||
|
"`updated_at` bigint NOT NULL, `user_settings` longtext NULL);" +
|
||||||
|
"CREATE TABLE `{{shared_sessions}}` (`key` varchar(128) NOT NULL PRIMARY KEY, " +
|
||||||
|
"`data` longtext NOT NULL, `type` integer NOT NULL, `timestamp` bigint NOT NULL);" +
|
||||||
"CREATE TABLE `{{users}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `username` varchar(255) NOT NULL UNIQUE, " +
|
"CREATE TABLE `{{users}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `username` varchar(255) NOT NULL UNIQUE, " +
|
||||||
"`status` integer NOT NULL, `expiration_date` bigint NOT NULL, `description` varchar(512) NULL, `password` longtext NULL, " +
|
"`status` integer NOT NULL, `expiration_date` bigint NOT NULL, `description` varchar(512) NULL, `password` longtext NULL, " +
|
||||||
"`public_keys` longtext NULL, `home_dir` longtext NOT NULL, `uid` bigint NOT NULL, `gid` bigint NOT NULL, " +
|
"`public_keys` longtext NULL, `home_dir` longtext NOT NULL, `uid` bigint NOT NULL, `gid` bigint NOT NULL, " +
|
||||||
|
@ -58,12 +68,33 @@ const (
|
||||||
"`permissions` longtext NOT NULL, `used_quota_size` bigint NOT NULL, `used_quota_files` integer NOT NULL, " +
|
"`permissions` longtext NOT NULL, `used_quota_size` bigint NOT NULL, `used_quota_files` integer NOT NULL, " +
|
||||||
"`last_quota_update` bigint NOT NULL, `upload_bandwidth` integer NOT NULL, `download_bandwidth` integer NOT NULL, " +
|
"`last_quota_update` bigint NOT NULL, `upload_bandwidth` integer NOT NULL, `download_bandwidth` integer NOT NULL, " +
|
||||||
"`last_login` bigint NOT NULL, `filters` longtext NULL, `filesystem` longtext NULL, `additional_info` longtext NULL, " +
|
"`last_login` bigint NOT NULL, `filters` longtext NULL, `filesystem` longtext NULL, `additional_info` longtext NULL, " +
|
||||||
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL, `email` varchar(255) NULL);" +
|
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL, `email` varchar(255) NULL, " +
|
||||||
"CREATE TABLE `{{folders_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `virtual_path` longtext NOT NULL, " +
|
"`upload_data_transfer` integer NOT NULL, `download_data_transfer` integer NOT NULL, " +
|
||||||
|
"`total_data_transfer` integer NOT NULL, `used_upload_data_transfer` integer NOT NULL, " +
|
||||||
|
"`used_download_data_transfer` integer NOT NULL);" +
|
||||||
|
"CREATE TABLE `{{groups_folders_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||||
|
"`group_id` integer NOT NULL, `folder_id` integer NOT NULL, " +
|
||||||
|
"`virtual_path` longtext NOT NULL, `quota_size` bigint NOT NULL, `quota_files` integer NOT NULL);" +
|
||||||
|
"CREATE TABLE `{{users_groups_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||||
|
"`user_id` integer NOT NULL, `group_id` integer NOT NULL, `group_type` integer NOT NULL);" +
|
||||||
|
"CREATE TABLE `{{users_folders_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `virtual_path` longtext NOT NULL, " +
|
||||||
"`quota_size` bigint NOT NULL, `quota_files` integer NOT NULL, `folder_id` integer NOT NULL, `user_id` integer NOT NULL);" +
|
"`quota_size` bigint NOT NULL, `quota_files` integer NOT NULL, `folder_id` integer NOT NULL, `user_id` integer NOT NULL);" +
|
||||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_mapping` UNIQUE (`user_id`, `folder_id`);" +
|
"ALTER TABLE `{{users_folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_user_folder_mapping` " +
|
||||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_folder_id_fk_folders_id` FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
|
"UNIQUE (`user_id`, `folder_id`);" +
|
||||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_user_id_fk_users_id` FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
"ALTER TABLE `{{users_folders_mapping}}` ADD CONSTRAINT `{{prefix}}users_folders_mapping_user_id_fk_users_id` " +
|
||||||
|
"FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
||||||
|
"ALTER TABLE `{{users_folders_mapping}}` ADD CONSTRAINT `{{prefix}}users_folders_mapping_folder_id_fk_folders_id` " +
|
||||||
|
"FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
|
||||||
|
"ALTER TABLE `{{users_groups_mapping}}` ADD CONSTRAINT `{{prefix}}unique_user_group_mapping` UNIQUE (`user_id`, `group_id`);" +
|
||||||
|
"ALTER TABLE `{{groups_folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_group_folder_mapping` UNIQUE (`group_id`, `folder_id`);" +
|
||||||
|
"ALTER TABLE `{{users_groups_mapping}}` ADD CONSTRAINT `{{prefix}}users_groups_mapping_group_id_fk_groups_id` " +
|
||||||
|
"FOREIGN KEY (`group_id`) REFERENCES `{{groups}}` (`id`) ON DELETE NO ACTION;" +
|
||||||
|
"ALTER TABLE `{{users_groups_mapping}}` ADD CONSTRAINT `{{prefix}}users_groups_mapping_user_id_fk_users_id` " +
|
||||||
|
"FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
||||||
|
"ALTER TABLE `{{groups_folders_mapping}}` ADD CONSTRAINT `{{prefix}}groups_folders_mapping_folder_id_fk_folders_id` " +
|
||||||
|
"FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
|
||||||
|
"ALTER TABLE `{{groups_folders_mapping}}` ADD CONSTRAINT `{{prefix}}groups_folders_mapping_group_id_fk_groups_id` " +
|
||||||
|
"FOREIGN KEY (`group_id`) REFERENCES `{{groups}}` (`id`) ON DELETE CASCADE;" +
|
||||||
"CREATE TABLE `{{shares}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
"CREATE TABLE `{{shares}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||||
"`share_id` varchar(60) NOT NULL UNIQUE, `name` varchar(255) NOT NULL, `description` varchar(512) NULL, " +
|
"`share_id` varchar(60) NOT NULL UNIQUE, `name` varchar(255) NOT NULL, `description` varchar(512) NULL, " +
|
||||||
"`scope` integer NOT NULL, `paths` longtext NOT NULL, `created_at` bigint NOT NULL, " +
|
"`scope` integer NOT NULL, `paths` longtext NOT NULL, `created_at` bigint NOT NULL, " +
|
||||||
|
@ -81,83 +112,13 @@ const (
|
||||||
"CREATE INDEX `{{prefix}}defender_hosts_updated_at_idx` ON `{{defender_hosts}}` (`updated_at`);" +
|
"CREATE INDEX `{{prefix}}defender_hosts_updated_at_idx` ON `{{defender_hosts}}` (`updated_at`);" +
|
||||||
"CREATE INDEX `{{prefix}}defender_hosts_ban_time_idx` ON `{{defender_hosts}}` (`ban_time`);" +
|
"CREATE INDEX `{{prefix}}defender_hosts_ban_time_idx` ON `{{defender_hosts}}` (`ban_time`);" +
|
||||||
"CREATE INDEX `{{prefix}}defender_events_date_time_idx` ON `{{defender_events}}` (`date_time`);" +
|
"CREATE INDEX `{{prefix}}defender_events_date_time_idx` ON `{{defender_events}}` (`date_time`);" +
|
||||||
"INSERT INTO {{schema_version}} (version) VALUES (15);"
|
|
||||||
mysqlV16SQL = "ALTER TABLE `{{users}}` ADD COLUMN `download_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
|
||||||
"ALTER TABLE `{{users}}` ALTER COLUMN `download_data_transfer` DROP DEFAULT;" +
|
|
||||||
"ALTER TABLE `{{users}}` ADD COLUMN `total_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
|
||||||
"ALTER TABLE `{{users}}` ALTER COLUMN `total_data_transfer` DROP DEFAULT;" +
|
|
||||||
"ALTER TABLE `{{users}}` ADD COLUMN `upload_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
|
||||||
"ALTER TABLE `{{users}}` ALTER COLUMN `upload_data_transfer` DROP DEFAULT;" +
|
|
||||||
"ALTER TABLE `{{users}}` ADD COLUMN `used_download_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
|
||||||
"ALTER TABLE `{{users}}` ALTER COLUMN `used_download_data_transfer` DROP DEFAULT;" +
|
|
||||||
"ALTER TABLE `{{users}}` ADD COLUMN `used_upload_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
|
||||||
"ALTER TABLE `{{users}}` ALTER COLUMN `used_upload_data_transfer` DROP DEFAULT;" +
|
|
||||||
"CREATE TABLE `{{active_transfers}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
|
||||||
"`connection_id` varchar(100) NOT NULL, `transfer_id` bigint NOT NULL, `transfer_type` integer NOT NULL, " +
|
|
||||||
"`username` varchar(255) NOT NULL, `folder_name` varchar(255) NULL, `ip` varchar(50) NOT NULL, " +
|
|
||||||
"`truncated_size` bigint NOT NULL, `current_ul_size` bigint NOT NULL, `current_dl_size` bigint NOT NULL, " +
|
|
||||||
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL);" +
|
|
||||||
"CREATE INDEX `{{prefix}}active_transfers_connection_id_idx` ON `{{active_transfers}}` (`connection_id`);" +
|
"CREATE INDEX `{{prefix}}active_transfers_connection_id_idx` ON `{{active_transfers}}` (`connection_id`);" +
|
||||||
"CREATE INDEX `{{prefix}}active_transfers_transfer_id_idx` ON `{{active_transfers}}` (`transfer_id`);" +
|
"CREATE INDEX `{{prefix}}active_transfers_transfer_id_idx` ON `{{active_transfers}}` (`transfer_id`);" +
|
||||||
"CREATE INDEX `{{prefix}}active_transfers_updated_at_idx` ON `{{active_transfers}}` (`updated_at`);"
|
"CREATE INDEX `{{prefix}}active_transfers_updated_at_idx` ON `{{active_transfers}}` (`updated_at`);" +
|
||||||
mysqlV16DownSQL = "ALTER TABLE `{{users}}` DROP COLUMN `used_upload_data_transfer`;" +
|
"CREATE INDEX `{{prefix}}groups_updated_at_idx` ON `{{groups}}` (`updated_at`);" +
|
||||||
"ALTER TABLE `{{users}}` DROP COLUMN `used_download_data_transfer`;" +
|
|
||||||
"ALTER TABLE `{{users}}` DROP COLUMN `upload_data_transfer`;" +
|
|
||||||
"ALTER TABLE `{{users}}` DROP COLUMN `total_data_transfer`;" +
|
|
||||||
"ALTER TABLE `{{users}}` DROP COLUMN `download_data_transfer`;" +
|
|
||||||
"DROP TABLE `{{active_transfers}}` CASCADE;"
|
|
||||||
mysqlV17SQL = "CREATE TABLE `{{groups}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
|
||||||
"`name` varchar(255) NOT NULL UNIQUE, `description` varchar(512) NULL, `created_at` bigint NOT NULL, " +
|
|
||||||
"`updated_at` bigint NOT NULL, `user_settings` longtext NULL);" +
|
|
||||||
"CREATE TABLE `{{groups_folders_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
|
||||||
"`group_id` integer NOT NULL, `folder_id` integer NOT NULL, " +
|
|
||||||
"`virtual_path` longtext NOT NULL, `quota_size` bigint NOT NULL, `quota_files` integer NOT NULL);" +
|
|
||||||
"CREATE TABLE `{{users_groups_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
|
||||||
"`user_id` integer NOT NULL, `group_id` integer NOT NULL, `group_type` integer NOT NULL);" +
|
|
||||||
"ALTER TABLE `{{folders_mapping}}` DROP FOREIGN KEY `{{prefix}}folders_mapping_folder_id_fk_folders_id`;" +
|
|
||||||
"ALTER TABLE `{{folders_mapping}}` DROP FOREIGN KEY `{{prefix}}folders_mapping_user_id_fk_users_id`;" +
|
|
||||||
"ALTER TABLE `{{folders_mapping}}` DROP INDEX `{{prefix}}unique_mapping`;" +
|
|
||||||
"RENAME TABLE `{{folders_mapping}}` TO `{{users_folders_mapping}}`;" +
|
|
||||||
"ALTER TABLE `{{users_folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_user_folder_mapping` " +
|
|
||||||
"UNIQUE (`user_id`, `folder_id`);" +
|
|
||||||
"ALTER TABLE `{{users_folders_mapping}}` ADD CONSTRAINT `{{prefix}}users_folders_mapping_user_id_fk_users_id` " +
|
|
||||||
"FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
|
||||||
"ALTER TABLE `{{users_folders_mapping}}` ADD CONSTRAINT `{{prefix}}users_folders_mapping_folder_id_fk_folders_id` " +
|
|
||||||
"FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
|
|
||||||
"ALTER TABLE `{{users_groups_mapping}}` ADD CONSTRAINT `{{prefix}}unique_user_group_mapping` UNIQUE (`user_id`, `group_id`);" +
|
|
||||||
"ALTER TABLE `{{groups_folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_group_folder_mapping` UNIQUE (`group_id`, `folder_id`);" +
|
|
||||||
"ALTER TABLE `{{users_groups_mapping}}` ADD CONSTRAINT `{{prefix}}users_groups_mapping_group_id_fk_groups_id` " +
|
|
||||||
"FOREIGN KEY (`group_id`) REFERENCES `{{groups}}` (`id`) ON DELETE NO ACTION;" +
|
|
||||||
"ALTER TABLE `{{users_groups_mapping}}` ADD CONSTRAINT `{{prefix}}users_groups_mapping_user_id_fk_users_id` " +
|
|
||||||
"FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
|
||||||
"ALTER TABLE `{{groups_folders_mapping}}` ADD CONSTRAINT `{{prefix}}groups_folders_mapping_folder_id_fk_folders_id` " +
|
|
||||||
"FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
|
|
||||||
"ALTER TABLE `{{groups_folders_mapping}}` ADD CONSTRAINT `{{prefix}}groups_folders_mapping_group_id_fk_groups_id` " +
|
|
||||||
"FOREIGN KEY (`group_id`) REFERENCES `{{groups}}` (`id`) ON DELETE CASCADE;" +
|
|
||||||
"CREATE INDEX `{{prefix}}groups_updated_at_idx` ON `{{groups}}` (`updated_at`);"
|
|
||||||
mysqlV17DownSQL = "ALTER TABLE `{{groups_folders_mapping}}` DROP FOREIGN KEY `{{prefix}}groups_folders_mapping_group_id_fk_groups_id`;" +
|
|
||||||
"ALTER TABLE `{{groups_folders_mapping}}` DROP FOREIGN KEY `{{prefix}}groups_folders_mapping_folder_id_fk_folders_id`;" +
|
|
||||||
"ALTER TABLE `{{users_groups_mapping}}` DROP FOREIGN KEY `{{prefix}}users_groups_mapping_user_id_fk_users_id`;" +
|
|
||||||
"ALTER TABLE `{{users_groups_mapping}}` DROP FOREIGN KEY `{{prefix}}users_groups_mapping_group_id_fk_groups_id`;" +
|
|
||||||
"ALTER TABLE `{{groups_folders_mapping}}` DROP INDEX `{{prefix}}unique_group_folder_mapping`;" +
|
|
||||||
"ALTER TABLE `{{users_groups_mapping}}` DROP INDEX `{{prefix}}unique_user_group_mapping`;" +
|
|
||||||
"DROP TABLE `{{users_groups_mapping}}` CASCADE;" +
|
|
||||||
"DROP TABLE `{{groups_folders_mapping}}` CASCADE;" +
|
|
||||||
"DROP TABLE `{{groups}}` CASCADE;" +
|
|
||||||
"ALTER TABLE `{{users_folders_mapping}}` DROP FOREIGN KEY `{{prefix}}users_folders_mapping_folder_id_fk_folders_id`;" +
|
|
||||||
"ALTER TABLE `{{users_folders_mapping}}` DROP FOREIGN KEY `{{prefix}}users_folders_mapping_user_id_fk_users_id`;" +
|
|
||||||
"ALTER TABLE `{{users_folders_mapping}}` DROP INDEX `{{prefix}}unique_user_folder_mapping`;" +
|
|
||||||
"RENAME TABLE `{{users_folders_mapping}}` TO `{{folders_mapping}}`;" +
|
|
||||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_mapping` UNIQUE (`user_id`, `folder_id`);" +
|
|
||||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_user_id_fk_users_id` " +
|
|
||||||
"FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
|
||||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_folder_id_fk_folders_id` " +
|
|
||||||
"FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;"
|
|
||||||
mysqlV19SQL = "CREATE TABLE `{{shared_sessions}}` (`key` varchar(128) NOT NULL PRIMARY KEY, " +
|
|
||||||
"`data` longtext NOT NULL, `type` integer NOT NULL, `timestamp` bigint NOT NULL);" +
|
|
||||||
"CREATE INDEX `{{prefix}}shared_sessions_type_idx` ON `{{shared_sessions}}` (`type`);" +
|
"CREATE INDEX `{{prefix}}shared_sessions_type_idx` ON `{{shared_sessions}}` (`type`);" +
|
||||||
"CREATE INDEX `{{prefix}}shared_sessions_timestamp_idx` ON `{{shared_sessions}}` (`timestamp`);"
|
"CREATE INDEX `{{prefix}}shared_sessions_timestamp_idx` ON `{{shared_sessions}}` (`timestamp`);" +
|
||||||
mysqlV19DownSQL = "DROP TABLE `{{shared_sessions}}` CASCADE;"
|
"INSERT INTO {{schema_version}} (version) VALUES (19);"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MySQLProvider defines the auth provider for MySQL/MariaDB database
|
// MySQLProvider defines the auth provider for MySQL/MariaDB database
|
||||||
|
@ -559,20 +520,11 @@ func (p *MySQLProvider) initializeDatabase() error {
|
||||||
if errors.Is(err, sql.ErrNoRows) {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
return errSchemaVersionEmpty
|
return errSchemaVersionEmpty
|
||||||
}
|
}
|
||||||
logger.InfoToConsole("creating initial database schema, version 15")
|
logger.InfoToConsole("creating initial database schema, version 19")
|
||||||
providerLog(logger.LevelInfo, "creating initial database schema, version 15")
|
providerLog(logger.LevelInfo, "creating initial database schema, version 19")
|
||||||
initialSQL := strings.ReplaceAll(mysqlInitialSQL, "{{schema_version}}", sqlTableSchemaVersion)
|
initialSQL := sqlReplaceAll(mysqlInitialSQL)
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{admins}}", sqlTableAdmins)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders}}", sqlTableFolders)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{users}}", sqlTableUsers)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{api_keys}}", sqlTableAPIKeys)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{shares}}", sqlTableShares)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_events}}", sqlTableDefenderEvents)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_hosts}}", sqlTableDefenderHosts)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, strings.Split(initialSQL, ";"), 15, true)
|
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, strings.Split(initialSQL, ";"), 19, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *MySQLProvider) migrateDatabase() error { //nolint:dupl
|
func (p *MySQLProvider) migrateDatabase() error { //nolint:dupl
|
||||||
|
@ -585,19 +537,11 @@ func (p *MySQLProvider) migrateDatabase() error { //nolint:dupl
|
||||||
case version == sqlDatabaseVersion:
|
case version == sqlDatabaseVersion:
|
||||||
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
||||||
return ErrNoInitRequired
|
return ErrNoInitRequired
|
||||||
case version < 15:
|
case version < 19:
|
||||||
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
||||||
providerLog(logger.LevelError, "%v", err)
|
providerLog(logger.LevelError, "%v", err)
|
||||||
logger.ErrorToConsole("%v", err)
|
logger.ErrorToConsole("%v", err)
|
||||||
return err
|
return err
|
||||||
case version == 15:
|
|
||||||
return updateMySQLDatabaseFromV15(p.dbHandle)
|
|
||||||
case version == 16:
|
|
||||||
return updateMySQLDatabaseFromV16(p.dbHandle)
|
|
||||||
case version == 17:
|
|
||||||
return updateMySQLDatabaseFromV17(p.dbHandle)
|
|
||||||
case version == 18:
|
|
||||||
return updateMySQLDatabaseFromV18(p.dbHandle)
|
|
||||||
default:
|
default:
|
||||||
if version > sqlDatabaseVersion {
|
if version > sqlDatabaseVersion {
|
||||||
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
||||||
|
@ -620,14 +564,6 @@ func (p *MySQLProvider) revertDatabase(targetVersion int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch dbVersion.Version {
|
switch dbVersion.Version {
|
||||||
case 16:
|
|
||||||
return downgradeMySQLDatabaseFromV16(p.dbHandle)
|
|
||||||
case 17:
|
|
||||||
return downgradeMySQLDatabaseFromV17(p.dbHandle)
|
|
||||||
case 18:
|
|
||||||
return downgradeMySQLDatabaseFromV18(p.dbHandle)
|
|
||||||
case 19:
|
|
||||||
return downgradeMySQLDatabaseFromV19(p.dbHandle)
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
||||||
}
|
}
|
||||||
|
@ -637,128 +573,3 @@ func (p *MySQLProvider) resetDatabase() error {
|
||||||
sql := sqlReplaceAll(mysqlResetSQL)
|
sql := sqlReplaceAll(mysqlResetSQL)
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, strings.Split(sql, ";"), 0, false)
|
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, strings.Split(sql, ";"), 0, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateMySQLDatabaseFromV15(dbHandle *sql.DB) error {
|
|
||||||
if err := updateMySQLDatabaseFrom15To16(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updateMySQLDatabaseFromV16(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFromV16(dbHandle *sql.DB) error {
|
|
||||||
if err := updateMySQLDatabaseFrom16To17(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updateMySQLDatabaseFromV17(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFromV17(dbHandle *sql.DB) error {
|
|
||||||
if err := updateMySQLDatabaseFrom17To18(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updateMySQLDatabaseFromV18(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFromV18(dbHandle *sql.DB) error {
|
|
||||||
return updateMySQLDatabaseFrom18To19(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFromV16(dbHandle *sql.DB) error {
|
|
||||||
return downgradeMySQLDatabaseFrom16To15(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFromV17(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradeMySQLDatabaseFrom17To16(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradeMySQLDatabaseFromV16(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFromV18(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradeMySQLDatabaseFrom18To17(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradeMySQLDatabaseFromV17(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFromV19(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradeMySQLDatabaseFrom19To18(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradeMySQLDatabaseFromV18(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFrom15To16(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 15 -> 16")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 15 -> 16")
|
|
||||||
sql := strings.ReplaceAll(mysqlV16SQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 16, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFrom16To17(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 16 -> 17")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 16 -> 17")
|
|
||||||
sql := strings.ReplaceAll(mysqlV17SQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups}}", sqlTableGroups)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 17, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFrom17To18(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 17 -> 18")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 17 -> 18")
|
|
||||||
if err := importGCSCredentials(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 18, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFrom18To19(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 18 -> 19")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 18 -> 19")
|
|
||||||
sql := strings.ReplaceAll(mysqlV19SQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 19, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFrom16To15(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 16 -> 15")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 16 -> 15")
|
|
||||||
sql := strings.ReplaceAll(mysqlV16DownSQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 15, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFrom17To16(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 17 -> 16")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 17 -> 16")
|
|
||||||
sql := strings.ReplaceAll(mysqlV17DownSQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups}}", sqlTableGroups)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 16, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFrom18To17(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 18 -> 17")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 18 -> 17")
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 17, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFrom19To18(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 19 -> 18")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 19 -> 18")
|
|
||||||
sql := strings.ReplaceAll(mysqlV19DownSQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 18, false)
|
|
||||||
}
|
|
||||||
|
|
|
@ -42,6 +42,11 @@ CREATE TABLE "{{admins}}" ("id" serial NOT NULL PRIMARY KEY, "username" varchar(
|
||||||
"description" varchar(512) NULL, "password" varchar(255) NOT NULL, "email" varchar(255) NULL, "status" integer NOT NULL,
|
"description" varchar(512) NULL, "password" varchar(255) NOT NULL, "email" varchar(255) NULL, "status" integer NOT NULL,
|
||||||
"permissions" text NOT NULL, "filters" text NULL, "additional_info" text NULL, "last_login" bigint NOT NULL,
|
"permissions" text NOT NULL, "filters" text NULL, "additional_info" text NULL, "last_login" bigint NOT NULL,
|
||||||
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
||||||
|
CREATE TABLE "{{active_transfers}}" ("id" bigserial NOT NULL PRIMARY KEY, "connection_id" varchar(100) NOT NULL,
|
||||||
|
"transfer_id" bigint NOT NULL, "transfer_type" integer NOT NULL, "username" varchar(255) NOT NULL,
|
||||||
|
"folder_name" varchar(255) NULL, "ip" varchar(50) NOT NULL, "truncated_size" bigint NOT NULL,
|
||||||
|
"current_ul_size" bigint NOT NULL, "current_dl_size" bigint NOT NULL, "created_at" bigint NOT NULL,
|
||||||
|
"updated_at" bigint NOT NULL);
|
||||||
CREATE TABLE "{{defender_hosts}}" ("id" bigserial NOT NULL PRIMARY KEY, "ip" varchar(50) NOT NULL UNIQUE,
|
CREATE TABLE "{{defender_hosts}}" ("id" bigserial NOT NULL PRIMARY KEY, "ip" varchar(50) NOT NULL UNIQUE,
|
||||||
"ban_time" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
"ban_time" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
||||||
CREATE TABLE "{{defender_events}}" ("id" bigserial NOT NULL PRIMARY KEY, "date_time" bigint NOT NULL, "score" integer NOT NULL,
|
CREATE TABLE "{{defender_events}}" ("id" bigserial NOT NULL PRIMARY KEY, "date_time" bigint NOT NULL, "score" integer NOT NULL,
|
||||||
|
@ -51,19 +56,29 @@ ALTER TABLE "{{defender_events}}" ADD CONSTRAINT "{{prefix}}defender_events_host
|
||||||
CREATE TABLE "{{folders}}" ("id" serial NOT NULL PRIMARY KEY, "name" varchar(255) NOT NULL UNIQUE, "description" varchar(512) NULL,
|
CREATE TABLE "{{folders}}" ("id" serial NOT NULL PRIMARY KEY, "name" varchar(255) NOT NULL UNIQUE, "description" varchar(512) NULL,
|
||||||
"path" text NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL,
|
"path" text NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL,
|
||||||
"filesystem" text NULL);
|
"filesystem" text NULL);
|
||||||
|
CREATE TABLE "{{groups}}" ("id" serial NOT NULL PRIMARY KEY, "name" varchar(255) NOT NULL UNIQUE,
|
||||||
|
"description" varchar(512) NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "user_settings" text NULL);
|
||||||
|
CREATE TABLE "{{shared_sessions}}" ("key" varchar(128) NOT NULL PRIMARY KEY,
|
||||||
|
"data" text NOT NULL, "type" integer NOT NULL, "timestamp" bigint NOT NULL);
|
||||||
CREATE TABLE "{{users}}" ("id" serial NOT NULL PRIMARY KEY, "username" varchar(255) NOT NULL UNIQUE, "status" integer NOT NULL,
|
CREATE TABLE "{{users}}" ("id" serial NOT NULL PRIMARY KEY, "username" varchar(255) NOT NULL UNIQUE, "status" integer NOT NULL,
|
||||||
"expiration_date" bigint NOT NULL, "description" varchar(512) NULL, "password" text NULL, "public_keys" text NULL,
|
"expiration_date" bigint NOT NULL, "description" varchar(512) NULL, "password" text NULL, "public_keys" text NULL,
|
||||||
"home_dir" text NOT NULL, "uid" bigint NOT NULL, "gid" bigint NOT NULL, "max_sessions" integer NOT NULL,
|
"home_dir" text NOT NULL, "uid" bigint NOT NULL, "gid" bigint NOT NULL, "max_sessions" integer NOT NULL,
|
||||||
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL, "used_quota_size" bigint NOT NULL,
|
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL, "used_quota_size" bigint NOT NULL,
|
||||||
"used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL,
|
"used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL,
|
||||||
"download_bandwidth" integer NOT NULL, "last_login" bigint NOT NULL, "filters" text NULL, "filesystem" text NULL,
|
"download_bandwidth" integer NOT NULL, "last_login" bigint NOT NULL, "filters" text NULL, "filesystem" text NULL,
|
||||||
"additional_info" text NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "email" varchar(255) NULL);
|
"additional_info" text NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "email" varchar(255) NULL,
|
||||||
CREATE TABLE "{{folders_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "virtual_path" text NOT NULL,
|
"upload_data_transfer" integer NOT NULL, "download_data_transfer" integer NOT NULL, "total_data_transfer" integer NOT NULL,
|
||||||
|
"used_upload_data_transfer" integer NOT NULL, "used_download_data_transfer" integer NOT NULL);
|
||||||
|
CREATE TABLE "{{groups_folders_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "group_id" integer NOT NULL,
|
||||||
|
"folder_id" integer NOT NULL, "virtual_path" text NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL);
|
||||||
|
CREATE TABLE "{{users_groups_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "user_id" integer NOT NULL,
|
||||||
|
"group_id" integer NOT NULL, "group_type" integer NOT NULL);
|
||||||
|
CREATE TABLE "{{users_folders_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "virtual_path" text NOT NULL,
|
||||||
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "folder_id" integer NOT NULL, "user_id" integer NOT NULL);
|
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "folder_id" integer NOT NULL, "user_id" integer NOT NULL);
|
||||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_mapping" UNIQUE ("user_id", "folder_id");
|
ALTER TABLE "{{users_folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_user_folder_mapping" UNIQUE ("user_id", "folder_id");
|
||||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}folders_mapping_folder_id_fk_folders_id"
|
ALTER TABLE "{{users_folders_mapping}}" ADD CONSTRAINT "{{prefix}}users_folders_mapping_folder_id_fk_folders_id"
|
||||||
FOREIGN KEY ("folder_id") REFERENCES "{{folders}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
FOREIGN KEY ("folder_id") REFERENCES "{{folders}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}folders_mapping_user_id_fk_users_id"
|
ALTER TABLE "{{users_folders_mapping}}" ADD CONSTRAINT "{{prefix}}users_folders_mapping_user_id_fk_users_id"
|
||||||
FOREIGN KEY ("user_id") REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
FOREIGN KEY ("user_id") REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||||
CREATE TABLE "{{shares}}" ("id" serial NOT NULL PRIMARY KEY,
|
CREATE TABLE "{{shares}}" ("id" serial NOT NULL PRIMARY KEY,
|
||||||
"share_id" varchar(60) NOT NULL UNIQUE, "name" varchar(255) NOT NULL, "description" varchar(512) NULL,
|
"share_id" varchar(60) NOT NULL UNIQUE, "name" varchar(255) NOT NULL, "description" varchar(512) NULL,
|
||||||
|
@ -81,57 +96,6 @@ ALTER TABLE "{{api_keys}}" ADD CONSTRAINT "{{prefix}}api_keys_admin_id_fk_admins
|
||||||
REFERENCES "{{admins}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
REFERENCES "{{admins}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||||
ALTER TABLE "{{api_keys}}" ADD CONSTRAINT "{{prefix}}api_keys_user_id_fk_users_id" FOREIGN KEY ("user_id")
|
ALTER TABLE "{{api_keys}}" ADD CONSTRAINT "{{prefix}}api_keys_user_id_fk_users_id" FOREIGN KEY ("user_id")
|
||||||
REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
|
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
|
|
||||||
CREATE INDEX "{{prefix}}api_keys_admin_id_idx" ON "{{api_keys}}" ("admin_id");
|
|
||||||
CREATE INDEX "{{prefix}}api_keys_user_id_idx" ON "{{api_keys}}" ("user_id");
|
|
||||||
CREATE INDEX "{{prefix}}users_updated_at_idx" ON "{{users}}" ("updated_at");
|
|
||||||
CREATE INDEX "{{prefix}}shares_user_id_idx" ON "{{shares}}" ("user_id");
|
|
||||||
CREATE INDEX "{{prefix}}defender_hosts_updated_at_idx" ON "{{defender_hosts}}" ("updated_at");
|
|
||||||
CREATE INDEX "{{prefix}}defender_hosts_ban_time_idx" ON "{{defender_hosts}}" ("ban_time");
|
|
||||||
CREATE INDEX "{{prefix}}defender_events_date_time_idx" ON "{{defender_events}}" ("date_time");
|
|
||||||
CREATE INDEX "{{prefix}}defender_events_host_id_idx" ON "{{defender_events}}" ("host_id");
|
|
||||||
INSERT INTO {{schema_version}} (version) VALUES (15);
|
|
||||||
`
|
|
||||||
pgsqlV16SQL = `ALTER TABLE "{{users}}" ADD COLUMN "download_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
ALTER TABLE "{{users}}" ALTER COLUMN "download_data_transfer" DROP DEFAULT;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "total_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
ALTER TABLE "{{users}}" ALTER COLUMN "total_data_transfer" DROP DEFAULT;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "upload_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
ALTER TABLE "{{users}}" ALTER COLUMN "upload_data_transfer" DROP DEFAULT;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "used_download_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
ALTER TABLE "{{users}}" ALTER COLUMN "used_download_data_transfer" DROP DEFAULT;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "used_upload_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
ALTER TABLE "{{users}}" ALTER COLUMN "used_upload_data_transfer" DROP DEFAULT;
|
|
||||||
CREATE TABLE "{{active_transfers}}" ("id" bigserial NOT NULL PRIMARY KEY, "connection_id" varchar(100) NOT NULL,
|
|
||||||
"transfer_id" bigint NOT NULL, "transfer_type" integer NOT NULL, "username" varchar(255) NOT NULL,
|
|
||||||
"folder_name" varchar(255) NULL, "ip" varchar(50) NOT NULL, "truncated_size" bigint NOT NULL,
|
|
||||||
"current_ul_size" bigint NOT NULL, "current_dl_size" bigint NOT NULL, "created_at" bigint NOT NULL,
|
|
||||||
"updated_at" bigint NOT NULL);
|
|
||||||
CREATE INDEX "{{prefix}}active_transfers_connection_id_idx" ON "{{active_transfers}}" ("connection_id");
|
|
||||||
CREATE INDEX "{{prefix}}active_transfers_transfer_id_idx" ON "{{active_transfers}}" ("transfer_id");
|
|
||||||
CREATE INDEX "{{prefix}}active_transfers_updated_at_idx" ON "{{active_transfers}}" ("updated_at");
|
|
||||||
`
|
|
||||||
pgsqlV16DownSQL = `ALTER TABLE "{{users}}" DROP COLUMN "used_upload_data_transfer" CASCADE;
|
|
||||||
ALTER TABLE "{{users}}" DROP COLUMN "used_download_data_transfer" CASCADE;
|
|
||||||
ALTER TABLE "{{users}}" DROP COLUMN "upload_data_transfer" CASCADE;
|
|
||||||
ALTER TABLE "{{users}}" DROP COLUMN "total_data_transfer" CASCADE;
|
|
||||||
ALTER TABLE "{{users}}" DROP COLUMN "download_data_transfer" CASCADE;
|
|
||||||
DROP TABLE "{{active_transfers}}" CASCADE;
|
|
||||||
`
|
|
||||||
pgsqlV17SQL = `CREATE TABLE "{{groups}}" ("id" serial NOT NULL PRIMARY KEY, "name" varchar(255) NOT NULL UNIQUE,
|
|
||||||
"description" varchar(512) NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "user_settings" text NULL);
|
|
||||||
CREATE TABLE "{{groups_folders_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "group_id" integer NOT NULL,
|
|
||||||
"folder_id" integer NOT NULL, "virtual_path" text NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL);
|
|
||||||
CREATE TABLE "{{users_groups_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "user_id" integer NOT NULL,
|
|
||||||
"group_id" integer NOT NULL, "group_type" integer NOT NULL);
|
|
||||||
DROP INDEX "{{prefix}}folders_mapping_folder_id_idx";
|
|
||||||
DROP INDEX "{{prefix}}folders_mapping_user_id_idx";
|
|
||||||
ALTER TABLE "{{folders_mapping}}" DROP CONSTRAINT "{{prefix}}unique_mapping";
|
|
||||||
ALTER TABLE "{{folders_mapping}}" RENAME TO "{{users_folders_mapping}}";
|
|
||||||
ALTER TABLE "{{users_folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_user_folder_mapping" UNIQUE ("user_id", "folder_id");
|
|
||||||
CREATE INDEX "{{prefix}}users_folders_mapping_folder_id_idx" ON "{{users_folders_mapping}}" ("folder_id");
|
|
||||||
CREATE INDEX "{{prefix}}users_folders_mapping_user_id_idx" ON "{{users_folders_mapping}}" ("user_id");
|
|
||||||
ALTER TABLE "{{users_groups_mapping}}" ADD CONSTRAINT "{{prefix}}unique_user_group_mapping" UNIQUE ("user_id", "group_id");
|
ALTER TABLE "{{users_groups_mapping}}" ADD CONSTRAINT "{{prefix}}unique_user_group_mapping" UNIQUE ("user_id", "group_id");
|
||||||
ALTER TABLE "{{groups_folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_group_folder_mapping" UNIQUE ("group_id", "folder_id");
|
ALTER TABLE "{{groups_folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_group_folder_mapping" UNIQUE ("group_id", "folder_id");
|
||||||
CREATE INDEX "{{prefix}}users_groups_mapping_group_id_idx" ON "{{users_groups_mapping}}" ("group_id");
|
CREATE INDEX "{{prefix}}users_groups_mapping_group_id_idx" ON "{{users_groups_mapping}}" ("group_id");
|
||||||
|
@ -147,23 +111,23 @@ CREATE INDEX "{{prefix}}groups_folders_mapping_group_id_idx" ON "{{groups_folder
|
||||||
ALTER TABLE "{{groups_folders_mapping}}" ADD CONSTRAINT "{{prefix}}groups_folders_mapping_group_id_fk_groups_id"
|
ALTER TABLE "{{groups_folders_mapping}}" ADD CONSTRAINT "{{prefix}}groups_folders_mapping_group_id_fk_groups_id"
|
||||||
FOREIGN KEY ("group_id") REFERENCES "groups" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
FOREIGN KEY ("group_id") REFERENCES "groups" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||||
CREATE INDEX "{{prefix}}groups_updated_at_idx" ON "{{groups}}" ("updated_at");
|
CREATE INDEX "{{prefix}}groups_updated_at_idx" ON "{{groups}}" ("updated_at");
|
||||||
`
|
CREATE INDEX "{{prefix}}users_folders_mapping_folder_id_idx" ON "{{users_folders_mapping}}" ("folder_id");
|
||||||
pgsqlV17DownSQL = `DROP TABLE "{{users_groups_mapping}}" CASCADE;
|
CREATE INDEX "{{prefix}}users_folders_mapping_user_id_idx" ON "{{users_folders_mapping}}" ("user_id");
|
||||||
DROP TABLE "{{groups_folders_mapping}}" CASCADE;
|
CREATE INDEX "{{prefix}}api_keys_admin_id_idx" ON "{{api_keys}}" ("admin_id");
|
||||||
DROP TABLE "{{groups}}" CASCADE;
|
CREATE INDEX "{{prefix}}api_keys_user_id_idx" ON "{{api_keys}}" ("user_id");
|
||||||
DROP INDEX "{{prefix}}users_folders_mapping_folder_id_idx";
|
CREATE INDEX "{{prefix}}users_updated_at_idx" ON "{{users}}" ("updated_at");
|
||||||
DROP INDEX "{{prefix}}users_folders_mapping_user_id_idx";
|
CREATE INDEX "{{prefix}}shares_user_id_idx" ON "{{shares}}" ("user_id");
|
||||||
ALTER TABLE "{{users_folders_mapping}}" DROP CONSTRAINT "{{prefix}}unique_user_folder_mapping";
|
CREATE INDEX "{{prefix}}defender_hosts_updated_at_idx" ON "{{defender_hosts}}" ("updated_at");
|
||||||
ALTER TABLE "{{users_folders_mapping}}" RENAME TO "{{folders_mapping}}";
|
CREATE INDEX "{{prefix}}defender_hosts_ban_time_idx" ON "{{defender_hosts}}" ("ban_time");
|
||||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_mapping" UNIQUE ("user_id", "folder_id");
|
CREATE INDEX "{{prefix}}defender_events_date_time_idx" ON "{{defender_events}}" ("date_time");
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
|
CREATE INDEX "{{prefix}}defender_events_host_id_idx" ON "{{defender_events}}" ("host_id");
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
|
CREATE INDEX "{{prefix}}active_transfers_connection_id_idx" ON "{{active_transfers}}" ("connection_id");
|
||||||
`
|
CREATE INDEX "{{prefix}}active_transfers_transfer_id_idx" ON "{{active_transfers}}" ("transfer_id");
|
||||||
pgsqlV19SQL = `CREATE TABLE "{{shared_sessions}}" ("key" varchar(128) NOT NULL PRIMARY KEY,
|
CREATE INDEX "{{prefix}}active_transfers_updated_at_idx" ON "{{active_transfers}}" ("updated_at");
|
||||||
"data" text NOT NULL, "type" integer NOT NULL, "timestamp" bigint NOT NULL);
|
|
||||||
CREATE INDEX "{{prefix}}shared_sessions_type_idx" ON "{{shared_sessions}}" ("type");
|
CREATE INDEX "{{prefix}}shared_sessions_type_idx" ON "{{shared_sessions}}" ("type");
|
||||||
CREATE INDEX "{{prefix}}shared_sessions_timestamp_idx" ON "{{shared_sessions}}" ("timestamp");`
|
CREATE INDEX "{{prefix}}shared_sessions_timestamp_idx" ON "{{shared_sessions}}" ("timestamp");
|
||||||
pgsqlV19DownSQL = `DROP TABLE "{{shared_sessions}}" CASCADE;`
|
INSERT INTO {{schema_version}} (version) VALUES (19);
|
||||||
|
`
|
||||||
)
|
)
|
||||||
|
|
||||||
// PGSQLProvider defines the auth provider for PostgreSQL database
|
// PGSQLProvider defines the auth provider for PostgreSQL database
|
||||||
|
@ -528,18 +492,9 @@ func (p *PGSQLProvider) initializeDatabase() error {
|
||||||
if errors.Is(err, sql.ErrNoRows) {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
return errSchemaVersionEmpty
|
return errSchemaVersionEmpty
|
||||||
}
|
}
|
||||||
logger.InfoToConsole("creating initial database schema, version 15")
|
logger.InfoToConsole("creating initial database schema, version 19")
|
||||||
providerLog(logger.LevelInfo, "creating initial database schema, version 15")
|
providerLog(logger.LevelInfo, "creating initial database schema, version 19")
|
||||||
initialSQL := strings.ReplaceAll(pgsqlInitial, "{{schema_version}}", sqlTableSchemaVersion)
|
initialSQL := sqlReplaceAll(pgsqlInitial)
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{admins}}", sqlTableAdmins)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders}}", sqlTableFolders)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{users}}", sqlTableUsers)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{api_keys}}", sqlTableAPIKeys)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{shares}}", sqlTableShares)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_events}}", sqlTableDefenderEvents)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_hosts}}", sqlTableDefenderHosts)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
if config.Driver == CockroachDataProviderName {
|
if config.Driver == CockroachDataProviderName {
|
||||||
// Cockroach does not support deferrable constraint validation, we don't need them,
|
// Cockroach does not support deferrable constraint validation, we don't need them,
|
||||||
// we keep these definitions for the PostgreSQL driver to avoid changes for users
|
// we keep these definitions for the PostgreSQL driver to avoid changes for users
|
||||||
|
@ -547,7 +502,7 @@ func (p *PGSQLProvider) initializeDatabase() error {
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "DEFERRABLE INITIALLY DEFERRED", "")
|
initialSQL = strings.ReplaceAll(initialSQL, "DEFERRABLE INITIALLY DEFERRED", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{initialSQL}, 15, true)
|
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{initialSQL}, 19, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PGSQLProvider) migrateDatabase() error { //nolint:dupl
|
func (p *PGSQLProvider) migrateDatabase() error { //nolint:dupl
|
||||||
|
@ -560,19 +515,11 @@ func (p *PGSQLProvider) migrateDatabase() error { //nolint:dupl
|
||||||
case version == sqlDatabaseVersion:
|
case version == sqlDatabaseVersion:
|
||||||
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
||||||
return ErrNoInitRequired
|
return ErrNoInitRequired
|
||||||
case version < 15:
|
case version < 19:
|
||||||
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
||||||
providerLog(logger.LevelError, "%v", err)
|
providerLog(logger.LevelError, "%v", err)
|
||||||
logger.ErrorToConsole("%v", err)
|
logger.ErrorToConsole("%v", err)
|
||||||
return err
|
return err
|
||||||
case version == 15:
|
|
||||||
return updatePGSQLDatabaseFromV15(p.dbHandle)
|
|
||||||
case version == 16:
|
|
||||||
return updatePGSQLDatabaseFromV16(p.dbHandle)
|
|
||||||
case version == 17:
|
|
||||||
return updatePGSQLDatabaseFromV17(p.dbHandle)
|
|
||||||
case version == 18:
|
|
||||||
return updatePGSQLDatabaseFromV18(p.dbHandle)
|
|
||||||
default:
|
default:
|
||||||
if version > sqlDatabaseVersion {
|
if version > sqlDatabaseVersion {
|
||||||
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
||||||
|
@ -595,14 +542,6 @@ func (p *PGSQLProvider) revertDatabase(targetVersion int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch dbVersion.Version {
|
switch dbVersion.Version {
|
||||||
case 16:
|
|
||||||
return downgradePGSQLDatabaseFromV16(p.dbHandle)
|
|
||||||
case 17:
|
|
||||||
return downgradePGSQLDatabaseFromV17(p.dbHandle)
|
|
||||||
case 18:
|
|
||||||
return downgradePGSQLDatabaseFromV18(p.dbHandle)
|
|
||||||
case 19:
|
|
||||||
return downgradePGSQLDatabaseFromV19(p.dbHandle)
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
||||||
}
|
}
|
||||||
|
@ -612,154 +551,3 @@ func (p *PGSQLProvider) resetDatabase() error {
|
||||||
sql := sqlReplaceAll(pgsqlResetSQL)
|
sql := sqlReplaceAll(pgsqlResetSQL)
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{sql}, 0, false)
|
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{sql}, 0, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func updatePGSQLDatabaseFromV15(dbHandle *sql.DB) error {
|
|
||||||
if err := updatePGSQLDatabaseFrom15To16(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updatePGSQLDatabaseFromV16(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFromV16(dbHandle *sql.DB) error {
|
|
||||||
if err := updatePGSQLDatabaseFrom16To17(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updatePGSQLDatabaseFromV17(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFromV17(dbHandle *sql.DB) error {
|
|
||||||
if err := updatePGSQLDatabaseFrom17To18(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updatePGSQLDatabaseFromV18(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFromV18(dbHandle *sql.DB) error {
|
|
||||||
return updatePGSQLDatabaseFrom18To19(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFromV16(dbHandle *sql.DB) error {
|
|
||||||
return downgradePGSQLDatabaseFrom16To15(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFromV17(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradePGSQLDatabaseFrom17To16(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradePGSQLDatabaseFromV16(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFromV18(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradePGSQLDatabaseFrom18To17(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradePGSQLDatabaseFromV17(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFromV19(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradePGSQLDatabaseFrom19To18(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradePGSQLDatabaseFromV18(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFrom15To16(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 15 -> 16")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 15 -> 16")
|
|
||||||
sql := strings.ReplaceAll(pgsqlV16SQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
if config.Driver == CockroachDataProviderName {
|
|
||||||
// Cockroach does not allow to run this schema migration within a transaction
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
for _, q := range strings.Split(sql, ";") {
|
|
||||||
if strings.TrimSpace(q) == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
_, err := dbHandle.ExecContext(ctx, q)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sqlCommonUpdateDatabaseVersion(ctx, dbHandle, 16)
|
|
||||||
}
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 16, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFrom16To17(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 16 -> 17")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 16 -> 17")
|
|
||||||
sql := pgsqlV17SQL
|
|
||||||
if config.Driver == CockroachDataProviderName {
|
|
||||||
sql = strings.ReplaceAll(sql, `ALTER TABLE "{{folders_mapping}}" DROP CONSTRAINT "{{prefix}}unique_mapping";`,
|
|
||||||
`DROP INDEX "{{prefix}}unique_mapping" CASCADE;`)
|
|
||||||
}
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups}}", sqlTableGroups)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 17, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFrom17To18(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 17 -> 18")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 17 -> 18")
|
|
||||||
if err := importGCSCredentials(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 18, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFrom18To19(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 18 -> 19")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 18 -> 19")
|
|
||||||
sql := strings.ReplaceAll(pgsqlV19SQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 19, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFrom16To15(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 16 -> 15")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 16 -> 15")
|
|
||||||
sql := strings.ReplaceAll(pgsqlV16DownSQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 15, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFrom17To16(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 17 -> 16")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 17 -> 16")
|
|
||||||
sql := pgsqlV17DownSQL
|
|
||||||
if config.Driver == CockroachDataProviderName {
|
|
||||||
sql = strings.ReplaceAll(sql, `ALTER TABLE "{{users_folders_mapping}}" DROP CONSTRAINT "{{prefix}}unique_user_folder_mapping";`,
|
|
||||||
`DROP INDEX "{{prefix}}unique_user_folder_mapping" CASCADE;`)
|
|
||||||
}
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups}}", sqlTableGroups)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 16, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFrom18To17(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 18 -> 17")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 18 -> 17")
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 17, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFrom19To18(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 19 -> 18")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 19 -> 18")
|
|
||||||
sql := strings.ReplaceAll(pgsqlV19DownSQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 18, false)
|
|
||||||
}
|
|
||||||
|
|
|
@ -57,6 +57,7 @@ func sqlReplaceAll(sql string) string {
|
||||||
sql = strings.ReplaceAll(sql, "{{defender_events}}", sqlTableDefenderEvents)
|
sql = strings.ReplaceAll(sql, "{{defender_events}}", sqlTableDefenderEvents)
|
||||||
sql = strings.ReplaceAll(sql, "{{defender_hosts}}", sqlTableDefenderHosts)
|
sql = strings.ReplaceAll(sql, "{{defender_hosts}}", sqlTableDefenderHosts)
|
||||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
||||||
|
sql = strings.ReplaceAll(sql, "{{shared_sessions}}", sqlTableSharedSessions)
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||||
return sql
|
return sql
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
// we import go-sqlite3 here to be able to disable SQLite support using a build tag
|
// we import go-sqlite3 here to be able to disable SQLite support using a build tag
|
||||||
|
@ -44,6 +43,11 @@ CREATE TABLE "{{admins}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "use
|
||||||
"description" varchar(512) NULL, "password" varchar(255) NOT NULL, "email" varchar(255) NULL, "status" integer NOT NULL,
|
"description" varchar(512) NULL, "password" varchar(255) NOT NULL, "email" varchar(255) NULL, "status" integer NOT NULL,
|
||||||
"permissions" text NOT NULL, "filters" text NULL, "additional_info" text NULL, "last_login" bigint NOT NULL,
|
"permissions" text NOT NULL, "filters" text NULL, "additional_info" text NULL, "last_login" bigint NOT NULL,
|
||||||
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
||||||
|
CREATE TABLE "{{active_transfers}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "connection_id" varchar(100) NOT NULL,
|
||||||
|
"transfer_id" bigint NOT NULL, "transfer_type" integer NOT NULL, "username" varchar(255) NOT NULL,
|
||||||
|
"folder_name" varchar(255) NULL, "ip" varchar(50) NOT NULL, "truncated_size" bigint NOT NULL,
|
||||||
|
"current_ul_size" bigint NOT NULL, "current_dl_size" bigint NOT NULL, "created_at" bigint NOT NULL,
|
||||||
|
"updated_at" bigint NOT NULL);
|
||||||
CREATE TABLE "{{defender_hosts}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "ip" varchar(50) NOT NULL UNIQUE,
|
CREATE TABLE "{{defender_hosts}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "ip" varchar(50) NOT NULL UNIQUE,
|
||||||
"ban_time" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
"ban_time" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
||||||
CREATE TABLE "{{defender_events}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "date_time" bigint NOT NULL,
|
CREATE TABLE "{{defender_events}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "date_time" bigint NOT NULL,
|
||||||
|
@ -52,18 +56,34 @@ DEFERRABLE INITIALLY DEFERRED);
|
||||||
CREATE TABLE "{{folders}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE,
|
CREATE TABLE "{{folders}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE,
|
||||||
"description" varchar(512) NULL, "path" text NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL,
|
"description" varchar(512) NULL, "path" text NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL,
|
||||||
"last_quota_update" bigint NOT NULL, "filesystem" text NULL);
|
"last_quota_update" bigint NOT NULL, "filesystem" text NULL);
|
||||||
|
CREATE TABLE "{{groups}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE,
|
||||||
|
"description" varchar(512) NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "user_settings" text NULL);
|
||||||
|
CREATE TABLE "{{shared_sessions}}" ("key" varchar(128) NOT NULL PRIMARY KEY, "data" text NOT NULL,
|
||||||
|
"type" integer NOT NULL, "timestamp" bigint NOT NULL);
|
||||||
CREATE TABLE "{{users}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE,
|
CREATE TABLE "{{users}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE,
|
||||||
"status" integer NOT NULL, "expiration_date" bigint NOT NULL, "description" varchar(512) NULL, "password" text NULL,
|
"status" integer NOT NULL, "expiration_date" bigint NOT NULL, "description" varchar(512) NULL, "password" text NULL,
|
||||||
"public_keys" text NULL, "home_dir" text NOT NULL, "uid" bigint NOT NULL, "gid" bigint NOT NULL,
|
"public_keys" text NULL, "home_dir" text NOT NULL, "uid" bigint NOT NULL, "gid" bigint NOT NULL,
|
||||||
"max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL,
|
"max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL,
|
||||||
"used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL,
|
"used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL,
|
||||||
"upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL, "last_login" bigint NOT NULL, "filters" text NULL,
|
"upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL, "last_login" bigint NOT NULL,
|
||||||
"filesystem" text NULL, "additional_info" text NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL,
|
"filters" text NULL, "filesystem" text NULL, "additional_info" text NULL, "created_at" bigint NOT NULL,
|
||||||
"email" varchar(255) NULL);
|
"updated_at" bigint NOT NULL, "email" varchar(255) NULL, "upload_data_transfer" integer NOT NULL,
|
||||||
CREATE TABLE "{{folders_mapping}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "virtual_path" text NOT NULL,
|
"download_data_transfer" integer NOT NULL, "total_data_transfer" integer NOT NULL, "used_upload_data_transfer" integer NOT NULL,
|
||||||
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "folder_id" integer NOT NULL REFERENCES "{{folders}}" ("id")
|
"used_download_data_transfer" integer NOT NULL);
|
||||||
ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, "user_id" integer NOT NULL REFERENCES "{{users}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
CREATE TABLE "{{groups_folders_mapping}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
CONSTRAINT "{{prefix}}unique_mapping" UNIQUE ("user_id", "folder_id"));
|
"folder_id" integer NOT NULL REFERENCES "{{folders}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
"group_id" integer NOT NULL REFERENCES "{{groups}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
"virtual_path" text NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL,
|
||||||
|
CONSTRAINT "{{prefix}}unique_group_folder_mapping" UNIQUE ("group_id", "folder_id"));
|
||||||
|
CREATE TABLE "{{users_groups_mapping}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
"user_id" integer NOT NULL REFERENCES "users" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
"group_id" integer NOT NULL REFERENCES "groups" ("id") ON DELETE NO ACTION,
|
||||||
|
"group_type" integer NOT NULL, CONSTRAINT "{{prefix}}unique_user_group_mapping" UNIQUE ("user_id", "group_id"));
|
||||||
|
CREATE TABLE "{{users_folders_mapping}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
"user_id" integer NOT NULL REFERENCES "users" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
"folder_id" integer NOT NULL REFERENCES "folders" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
"virtual_path" text NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL,
|
||||||
|
CONSTRAINT "{{prefix}}unique_user_folder_mapping" UNIQUE ("user_id", "folder_id"));
|
||||||
CREATE TABLE "{{shares}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "share_id" varchar(60) NOT NULL UNIQUE,
|
CREATE TABLE "{{shares}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "share_id" varchar(60) NOT NULL UNIQUE,
|
||||||
"name" varchar(255) NOT NULL, "description" varchar(512) NULL, "scope" integer NOT NULL, "paths" text NOT NULL,
|
"name" varchar(255) NOT NULL, "description" varchar(512) NULL, "scope" integer NOT NULL, "paths" text NOT NULL,
|
||||||
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "last_use_at" bigint NOT NULL, "expires_at" bigint NOT NULL,
|
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "last_use_at" bigint NOT NULL, "expires_at" bigint NOT NULL,
|
||||||
|
@ -74,8 +94,13 @@ CREATE TABLE "{{api_keys}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "n
|
||||||
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "last_use_at" bigint NOT NULL, "expires_at" bigint NOT NULL,
|
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "last_use_at" bigint NOT NULL, "expires_at" bigint NOT NULL,
|
||||||
"description" text NULL, "admin_id" integer NULL REFERENCES "{{admins}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
"description" text NULL, "admin_id" integer NULL REFERENCES "{{admins}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
"user_id" integer NULL REFERENCES "{{users}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED);
|
"user_id" integer NULL REFERENCES "{{users}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED);
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
|
CREATE INDEX "{{prefix}}groups_updated_at_idx" ON "{{groups}}" ("updated_at");
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
|
CREATE INDEX "{{prefix}}users_folders_mapping_folder_id_idx" ON "{{users_folders_mapping}}" ("folder_id");
|
||||||
|
CREATE INDEX "{{prefix}}users_folders_mapping_user_id_idx" ON "{{users_folders_mapping}}" ("user_id");
|
||||||
|
CREATE INDEX "{{prefix}}users_groups_mapping_group_id_idx" ON "{{users_groups_mapping}}" ("group_id");
|
||||||
|
CREATE INDEX "{{prefix}}users_groups_mapping_user_id_idx" ON "{{users_groups_mapping}}" ("user_id");
|
||||||
|
CREATE INDEX "{{prefix}}groups_folders_mapping_folder_id_idx" ON "{{groups_folders_mapping}}" ("folder_id");
|
||||||
|
CREATE INDEX "{{prefix}}groups_folders_mapping_group_id_idx" ON "{{groups_folders_mapping}}" ("group_id");
|
||||||
CREATE INDEX "{{prefix}}api_keys_admin_id_idx" ON "{{api_keys}}" ("admin_id");
|
CREATE INDEX "{{prefix}}api_keys_admin_id_idx" ON "{{api_keys}}" ("admin_id");
|
||||||
CREATE INDEX "{{prefix}}api_keys_user_id_idx" ON "{{api_keys}}" ("user_id");
|
CREATE INDEX "{{prefix}}api_keys_user_id_idx" ON "{{api_keys}}" ("user_id");
|
||||||
CREATE INDEX "{{prefix}}users_updated_at_idx" ON "{{users}}" ("updated_at");
|
CREATE INDEX "{{prefix}}users_updated_at_idx" ON "{{users}}" ("updated_at");
|
||||||
|
@ -84,78 +109,13 @@ CREATE INDEX "{{prefix}}defender_hosts_updated_at_idx" ON "{{defender_hosts}}" (
|
||||||
CREATE INDEX "{{prefix}}defender_hosts_ban_time_idx" ON "{{defender_hosts}}" ("ban_time");
|
CREATE INDEX "{{prefix}}defender_hosts_ban_time_idx" ON "{{defender_hosts}}" ("ban_time");
|
||||||
CREATE INDEX "{{prefix}}defender_events_date_time_idx" ON "{{defender_events}}" ("date_time");
|
CREATE INDEX "{{prefix}}defender_events_date_time_idx" ON "{{defender_events}}" ("date_time");
|
||||||
CREATE INDEX "{{prefix}}defender_events_host_id_idx" ON "{{defender_events}}" ("host_id");
|
CREATE INDEX "{{prefix}}defender_events_host_id_idx" ON "{{defender_events}}" ("host_id");
|
||||||
INSERT INTO {{schema_version}} (version) VALUES (15);
|
|
||||||
`
|
|
||||||
sqliteV16SQL = `ALTER TABLE "{{users}}" ADD COLUMN "download_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "total_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "upload_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "used_download_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "used_upload_data_transfer" integer DEFAULT 0 NOT NULL;
|
|
||||||
CREATE TABLE "{{active_transfers}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "connection_id" varchar(100) NOT NULL,
|
|
||||||
"transfer_id" bigint NOT NULL, "transfer_type" integer NOT NULL, "username" varchar(255) NOT NULL,
|
|
||||||
"folder_name" varchar(255) NULL, "ip" varchar(50) NOT NULL, "truncated_size" bigint NOT NULL,
|
|
||||||
"current_ul_size" bigint NOT NULL, "current_dl_size" bigint NOT NULL, "created_at" bigint NOT NULL,
|
|
||||||
"updated_at" bigint NOT NULL);
|
|
||||||
CREATE INDEX "{{prefix}}active_transfers_connection_id_idx" ON "{{active_transfers}}" ("connection_id");
|
CREATE INDEX "{{prefix}}active_transfers_connection_id_idx" ON "{{active_transfers}}" ("connection_id");
|
||||||
CREATE INDEX "{{prefix}}active_transfers_transfer_id_idx" ON "{{active_transfers}}" ("transfer_id");
|
CREATE INDEX "{{prefix}}active_transfers_transfer_id_idx" ON "{{active_transfers}}" ("transfer_id");
|
||||||
CREATE INDEX "{{prefix}}active_transfers_updated_at_idx" ON "{{active_transfers}}" ("updated_at");
|
CREATE INDEX "{{prefix}}active_transfers_updated_at_idx" ON "{{active_transfers}}" ("updated_at");
|
||||||
`
|
|
||||||
sqliteV16DownSQL = `ALTER TABLE "{{users}}" DROP COLUMN "used_upload_data_transfer";
|
|
||||||
ALTER TABLE "{{users}}" DROP COLUMN "used_download_data_transfer";
|
|
||||||
ALTER TABLE "{{users}}" DROP COLUMN "upload_data_transfer";
|
|
||||||
ALTER TABLE "{{users}}" DROP COLUMN "total_data_transfer";
|
|
||||||
ALTER TABLE "{{users}}" DROP COLUMN "download_data_transfer";
|
|
||||||
DROP TABLE "{{active_transfers}}";
|
|
||||||
`
|
|
||||||
sqliteV17SQL = `CREATE TABLE "{{groups}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE,
|
|
||||||
"description" varchar(512) NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "user_settings" text NULL);
|
|
||||||
CREATE TABLE "{{groups_folders_mapping}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
|
||||||
"folder_id" integer NOT NULL REFERENCES "{{folders}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
"group_id" integer NOT NULL REFERENCES "{{groups}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
"virtual_path" text NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL,
|
|
||||||
CONSTRAINT "{{prefix}}unique_group_folder_mapping" UNIQUE ("group_id", "folder_id"));
|
|
||||||
CREATE TABLE "{{users_groups_mapping}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
|
||||||
"user_id" integer NOT NULL REFERENCES "users" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
"group_id" integer NOT NULL REFERENCES "groups" ("id") ON DELETE NO ACTION,
|
|
||||||
"group_type" integer NOT NULL, CONSTRAINT "{{prefix}}unique_user_group_mapping" UNIQUE ("user_id", "group_id"));
|
|
||||||
CREATE TABLE "new__folders_mapping" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
|
||||||
"user_id" integer NOT NULL REFERENCES "users" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
"folder_id" integer NOT NULL REFERENCES "folders" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
"virtual_path" text NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL,
|
|
||||||
CONSTRAINT "{{prefix}}unique_user_folder_mapping" UNIQUE ("user_id", "folder_id"));
|
|
||||||
INSERT INTO "new__folders_mapping" ("id", "virtual_path", "quota_size", "quota_files", "folder_id", "user_id") SELECT "id",
|
|
||||||
"virtual_path", "quota_size", "quota_files", "folder_id", "user_id" FROM "{{folders_mapping}}";
|
|
||||||
DROP TABLE "{{folders_mapping}}";
|
|
||||||
ALTER TABLE "new__folders_mapping" RENAME TO "{{users_folders_mapping}}";
|
|
||||||
CREATE INDEX "{{prefix}}groups_updated_at_idx" ON "{{groups}}" ("updated_at");
|
|
||||||
CREATE INDEX "{{prefix}}users_folders_mapping_folder_id_idx" ON "{{users_folders_mapping}}" ("folder_id");
|
|
||||||
CREATE INDEX "{{prefix}}users_folders_mapping_user_id_idx" ON "{{users_folders_mapping}}" ("user_id");
|
|
||||||
CREATE INDEX "{{prefix}}users_groups_mapping_group_id_idx" ON "{{users_groups_mapping}}" ("group_id");
|
|
||||||
CREATE INDEX "{{prefix}}users_groups_mapping_user_id_idx" ON "{{users_groups_mapping}}" ("user_id");
|
|
||||||
CREATE INDEX "{{prefix}}groups_folders_mapping_folder_id_idx" ON "{{groups_folders_mapping}}" ("folder_id");
|
|
||||||
CREATE INDEX "{{prefix}}groups_folders_mapping_group_id_idx" ON "{{groups_folders_mapping}}" ("group_id");
|
|
||||||
`
|
|
||||||
sqliteV17DownSQL = `DROP TABLE "{{users_groups_mapping}}";
|
|
||||||
DROP TABLE "{{groups_folders_mapping}}";
|
|
||||||
DROP TABLE "{{groups}}";
|
|
||||||
CREATE TABLE "new__folders_mapping" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
|
||||||
"user_id" integer NOT NULL REFERENCES "users" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
"folder_id" integer NOT NULL REFERENCES "folders" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
"virtual_path" text NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL,
|
|
||||||
CONSTRAINT "{{prefix}}unique_folder_mapping" UNIQUE ("user_id", "folder_id"));
|
|
||||||
INSERT INTO "new__folders_mapping" ("id", "virtual_path", "quota_size", "quota_files", "folder_id", "user_id") SELECT "id",
|
|
||||||
"virtual_path", "quota_size", "quota_files", "folder_id", "user_id" FROM "{{users_folders_mapping}}";
|
|
||||||
DROP TABLE "{{users_folders_mapping}}";
|
|
||||||
ALTER TABLE "new__folders_mapping" RENAME TO "{{folders_mapping}}";
|
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
|
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
|
|
||||||
`
|
|
||||||
sqliteV19SQL = `CREATE TABLE "{{shared_sessions}}" ("key" varchar(128) NOT NULL PRIMARY KEY, "data" text NOT NULL,
|
|
||||||
"type" integer NOT NULL, "timestamp" bigint NOT NULL);
|
|
||||||
CREATE INDEX "{{prefix}}shared_sessions_type_idx" ON "{{shared_sessions}}" ("type");
|
CREATE INDEX "{{prefix}}shared_sessions_type_idx" ON "{{shared_sessions}}" ("type");
|
||||||
CREATE INDEX "{{prefix}}shared_sessions_timestamp_idx" ON "{{shared_sessions}}" ("timestamp");
|
CREATE INDEX "{{prefix}}shared_sessions_timestamp_idx" ON "{{shared_sessions}}" ("timestamp");
|
||||||
`
|
INSERT INTO {{schema_version}} (version) VALUES (19);
|
||||||
sqliteV19DownSQL = `DROP TABLE "{{shared_sessions}}";`
|
`
|
||||||
)
|
)
|
||||||
|
|
||||||
// SQLiteProvider defines the auth provider for SQLite database
|
// SQLiteProvider defines the auth provider for SQLite database
|
||||||
|
@ -506,20 +466,11 @@ func (p *SQLiteProvider) initializeDatabase() error {
|
||||||
if errors.Is(err, sql.ErrNoRows) {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
return errSchemaVersionEmpty
|
return errSchemaVersionEmpty
|
||||||
}
|
}
|
||||||
logger.InfoToConsole("creating initial database schema, version 15")
|
logger.InfoToConsole("creating initial database schema, version 19")
|
||||||
providerLog(logger.LevelInfo, "creating initial database schema, version 15")
|
providerLog(logger.LevelInfo, "creating initial database schema, version 19")
|
||||||
initialSQL := strings.ReplaceAll(sqliteInitialSQL, "{{schema_version}}", sqlTableSchemaVersion)
|
sql := sqlReplaceAll(sqliteInitialSQL)
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{admins}}", sqlTableAdmins)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders}}", sqlTableFolders)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{users}}", sqlTableUsers)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{api_keys}}", sqlTableAPIKeys)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{shares}}", sqlTableShares)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_events}}", sqlTableDefenderEvents)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_hosts}}", sqlTableDefenderHosts)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{initialSQL}, 15, true)
|
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{sql}, 19, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *SQLiteProvider) migrateDatabase() error { //nolint:dupl
|
func (p *SQLiteProvider) migrateDatabase() error { //nolint:dupl
|
||||||
|
@ -532,19 +483,11 @@ func (p *SQLiteProvider) migrateDatabase() error { //nolint:dupl
|
||||||
case version == sqlDatabaseVersion:
|
case version == sqlDatabaseVersion:
|
||||||
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
||||||
return ErrNoInitRequired
|
return ErrNoInitRequired
|
||||||
case version < 15:
|
case version < 19:
|
||||||
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
||||||
providerLog(logger.LevelError, "%v", err)
|
providerLog(logger.LevelError, "%v", err)
|
||||||
logger.ErrorToConsole("%v", err)
|
logger.ErrorToConsole("%v", err)
|
||||||
return err
|
return err
|
||||||
case version == 15:
|
|
||||||
return updateSQLiteDatabaseFromV15(p.dbHandle)
|
|
||||||
case version == 16:
|
|
||||||
return updateSQLiteDatabaseFromV16(p.dbHandle)
|
|
||||||
case version == 17:
|
|
||||||
return updateSQLiteDatabaseFromV17(p.dbHandle)
|
|
||||||
case version == 18:
|
|
||||||
return updateSQLiteDatabaseFromV18(p.dbHandle)
|
|
||||||
default:
|
default:
|
||||||
if version > sqlDatabaseVersion {
|
if version > sqlDatabaseVersion {
|
||||||
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
||||||
|
@ -567,14 +510,6 @@ func (p *SQLiteProvider) revertDatabase(targetVersion int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch dbVersion.Version {
|
switch dbVersion.Version {
|
||||||
case 16:
|
|
||||||
return downgradeSQLiteDatabaseFromV16(p.dbHandle)
|
|
||||||
case 17:
|
|
||||||
return downgradeSQLiteDatabaseFromV17(p.dbHandle)
|
|
||||||
case 18:
|
|
||||||
return downgradeSQLiteDatabaseFromV18(p.dbHandle)
|
|
||||||
case 19:
|
|
||||||
return downgradeSQLiteDatabaseFromV19(p.dbHandle)
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
||||||
}
|
}
|
||||||
|
@ -585,144 +520,7 @@ func (p *SQLiteProvider) resetDatabase() error {
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{sql}, 0, false)
|
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{sql}, 0, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateSQLiteDatabaseFromV15(dbHandle *sql.DB) error {
|
/*func setPragmaFK(dbHandle *sql.DB, value string) error {
|
||||||
if err := updateSQLiteDatabaseFrom15To16(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updateSQLiteDatabaseFromV16(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFromV16(dbHandle *sql.DB) error {
|
|
||||||
if err := updateSQLiteDatabaseFrom16To17(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updateSQLiteDatabaseFromV17(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFromV17(dbHandle *sql.DB) error {
|
|
||||||
if err := updateSQLiteDatabaseFrom17To18(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updateSQLiteDatabaseFromV18(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFromV18(dbHandle *sql.DB) error {
|
|
||||||
return updateSQLiteDatabaseFrom18To19(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFromV16(dbHandle *sql.DB) error {
|
|
||||||
return downgradeSQLiteDatabaseFrom16To15(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFromV17(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradeSQLiteDatabaseFrom17To16(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradeSQLiteDatabaseFromV16(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFromV18(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradeSQLiteDatabaseFrom18To17(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradeSQLiteDatabaseFromV17(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFromV19(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradeSQLiteDatabaseFrom19To18(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradeSQLiteDatabaseFromV18(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFrom15To16(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 15 -> 16")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 15 -> 16")
|
|
||||||
sql := strings.ReplaceAll(sqliteV16SQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 16, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFrom16To17(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 16 -> 17")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 16 -> 17")
|
|
||||||
if err := setPragmaFK(dbHandle, "OFF"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sql := strings.ReplaceAll(sqliteV17SQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups}}", sqlTableGroups)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
if err := sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 17, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return setPragmaFK(dbHandle, "ON")
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFrom17To18(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 17 -> 18")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 17 -> 18")
|
|
||||||
if err := importGCSCredentials(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 18, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFrom18To19(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 18 -> 19")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 18 -> 19")
|
|
||||||
sql := strings.ReplaceAll(sqliteV19SQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 19, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFrom16To15(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 16 -> 15")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 16 -> 15")
|
|
||||||
sql := strings.ReplaceAll(sqliteV16DownSQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 15, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFrom17To16(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 17 -> 16")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 17 -> 16")
|
|
||||||
if err := setPragmaFK(dbHandle, "OFF"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sql := strings.ReplaceAll(sqliteV17DownSQL, "{{groups}}", sqlTableGroups)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
if err := sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 16, false); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return setPragmaFK(dbHandle, "ON")
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFrom18To17(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 18 -> 17")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 18 -> 17")
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 17, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFrom19To18(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 19 -> 18")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 19 -> 18")
|
|
||||||
sql := strings.ReplaceAll(sqliteV19DownSQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 18, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setPragmaFK(dbHandle *sql.DB, value string) error {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
@ -730,4 +528,4 @@ func setPragmaFK(dbHandle *sql.DB, value string) error {
|
||||||
|
|
||||||
_, err := dbHandle.ExecContext(ctx, sql)
|
_, err := dbHandle.ExecContext(ctx, sql)
|
||||||
return err
|
return err
|
||||||
}
|
}*/
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -1833,8 +1832,3 @@ func (u *User) getACopy() User {
|
||||||
func (u *User) GetEncryptionAdditionalData() string {
|
func (u *User) GetEncryptionAdditionalData() string {
|
||||||
return u.Username
|
return u.Username
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetGCSCredentialsFilePath returns the path for GCS credentials
|
|
||||||
func (u *User) GetGCSCredentialsFilePath() string {
|
|
||||||
return filepath.Join(credentialsDirPath, fmt.Sprintf("%v_gcs_credentials.json", u.Username))
|
|
||||||
}
|
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -114,7 +114,7 @@ require (
|
||||||
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect
|
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
|
github.com/klauspost/cpuid/v2 v2.0.13 // indirect
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect
|
github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect
|
||||||
github.com/lestrrat-go/blackmagic v1.0.1 // indirect
|
github.com/lestrrat-go/blackmagic v1.0.1 // indirect
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -536,8 +536,8 @@ github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
|
||||||
github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
|
github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY=
|
||||||
github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12 h1:p9dKCg8i4gmOxtv35DvrYoWqYzQrvEVdjQ762Y0OqZE=
|
github.com/klauspost/cpuid/v2 v2.0.13 h1:1XxvOiqXZ8SULZUKim/wncr3wZ38H4yCuVDvKdK9OGs=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
github.com/klauspost/cpuid/v2 v2.0.13/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
|
|
@ -220,15 +220,13 @@ qwlk5iw/jQekxThg==
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
defaultPerms = []string{dataprovider.PermAny}
|
defaultPerms = []string{dataprovider.PermAny}
|
||||||
homeBasePath string
|
homeBasePath string
|
||||||
backupsPath string
|
backupsPath string
|
||||||
credentialsPath string
|
testServer *httptest.Server
|
||||||
testServer *httptest.Server
|
postConnectPath string
|
||||||
providerDriverName string
|
preActionPath string
|
||||||
postConnectPath string
|
lastResetCode string
|
||||||
preActionPath string
|
|
||||||
lastResetCode string
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type fakeConnection struct {
|
type fakeConnection struct {
|
||||||
|
@ -311,10 +309,6 @@ func TestMain(m *testing.M) {
|
||||||
pluginsConfig[0].Cmd += ".exe"
|
pluginsConfig[0].Cmd += ".exe"
|
||||||
}
|
}
|
||||||
providerConf := config.GetProviderConf()
|
providerConf := config.GetProviderConf()
|
||||||
credentialsPath = filepath.Join(os.TempDir(), "test_credentials")
|
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
providerDriverName = providerConf.Driver
|
|
||||||
os.RemoveAll(credentialsPath) //nolint:errcheck
|
|
||||||
logger.InfoToConsole("Starting HTTPD tests, provider: %v", providerConf.Driver)
|
logger.InfoToConsole("Starting HTTPD tests, provider: %v", providerConf.Driver)
|
||||||
|
|
||||||
err = common.Initialize(config.GetCommonConfig(), 0)
|
err = common.Initialize(config.GetCommonConfig(), 0)
|
||||||
|
@ -439,7 +433,6 @@ func TestMain(m *testing.M) {
|
||||||
exitCode := m.Run()
|
exitCode := m.Run()
|
||||||
os.Remove(logfilePath)
|
os.Remove(logfilePath)
|
||||||
os.RemoveAll(backupsPath)
|
os.RemoveAll(backupsPath)
|
||||||
os.RemoveAll(credentialsPath)
|
|
||||||
os.Remove(certPath)
|
os.Remove(certPath)
|
||||||
os.Remove(keyPath)
|
os.Remove(keyPath)
|
||||||
os.Remove(hostKeyPath)
|
os.Remove(hostKeyPath)
|
||||||
|
@ -1911,9 +1904,6 @@ func TestPasswordValidations(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf = config.GetProviderConf()
|
providerConf = config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -1965,8 +1955,6 @@ func TestAdminPasswordHashing(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf = config.GetProviderConf()
|
providerConf = config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -2276,10 +2264,6 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
|
||||||
u.FsConfig.S3Config.Bucket = ""
|
u.FsConfig.S3Config.Bucket = ""
|
||||||
_, _, err := httpdtest.AddUser(u, http.StatusBadRequest)
|
_, _, err := httpdtest.AddUser(u, http.StatusBadRequest)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.MkdirAll(credentialsPath, 0700)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
u.FsConfig.S3Config.Bucket = "testbucket"
|
u.FsConfig.S3Config.Bucket = "testbucket"
|
||||||
u.FsConfig.S3Config.Region = "eu-west-1" //nolint:goconst
|
u.FsConfig.S3Config.Region = "eu-west-1" //nolint:goconst
|
||||||
u.FsConfig.S3Config.AccessKey = "access-key" //nolint:goconst
|
u.FsConfig.S3Config.AccessKey = "access-key" //nolint:goconst
|
||||||
|
@ -4491,9 +4475,6 @@ func TestNamingRules(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf = config.GetProviderConf()
|
providerConf = config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if config.GetProviderConf().Driver == dataprovider.MemoryDataProviderName {
|
if config.GetProviderConf().Driver == dataprovider.MemoryDataProviderName {
|
||||||
|
@ -4708,9 +4689,6 @@ func TestSaveErrors(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf = config.GetProviderConf()
|
providerConf = config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if config.GetProviderConf().Driver == dataprovider.MemoryDataProviderName {
|
if config.GetProviderConf().Driver == dataprovider.MemoryDataProviderName {
|
||||||
|
@ -4800,9 +4778,6 @@ func TestUserBaseDir(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf = config.GetProviderConf()
|
providerConf = config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -4847,9 +4822,6 @@ func TestQuotaTrackingDisabled(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf = config.GetProviderConf()
|
providerConf = config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -5037,9 +5009,6 @@ func TestProviderErrors(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf := config.GetProviderConf()
|
providerConf := config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
httpdtest.SetJWTToken("")
|
httpdtest.SetJWTToken("")
|
||||||
|
@ -5197,10 +5166,7 @@ func TestDumpdata(t *testing.T) {
|
||||||
err = config.LoadConfig(configDir, "")
|
err = config.LoadConfig(configDir, "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf = config.GetProviderConf()
|
providerConf = config.GetProviderConf()
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -5373,10 +5339,7 @@ func TestDefenderAPIErrors(t *testing.T) {
|
||||||
err = config.LoadConfig(configDir, "")
|
err = config.LoadConfig(configDir, "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf := config.GetProviderConf()
|
providerConf := config.GetProviderConf()
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
@ -13587,9 +13550,6 @@ func TestWebAdminSetupMock(t *testing.T) {
|
||||||
err = config.LoadConfig(configDir, "")
|
err = config.LoadConfig(configDir, "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf := config.GetProviderConf()
|
providerConf := config.GetProviderConf()
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
// now the setup page must be rendered
|
// now the setup page must be rendered
|
||||||
|
@ -13679,9 +13639,6 @@ func TestWebAdminSetupMock(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf = config.GetProviderConf()
|
providerConf = config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
req, err = http.NewRequest(http.MethodPost, webAdminSetupPath, bytes.NewBuffer([]byte(form.Encode())))
|
req, err = http.NewRequest(http.MethodPost, webAdminSetupPath, bytes.NewBuffer([]byte(form.Encode())))
|
||||||
|
@ -16302,9 +16259,6 @@ func TestUserSaveFromTemplateMock(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf := config.GetProviderConf()
|
providerConf := config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -16612,9 +16566,6 @@ func TestFolderSaveFromTemplateMock(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf := config.GetProviderConf()
|
providerConf := config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -18961,9 +18912,6 @@ func TestProviderClosedMock(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
providerConf := config.GetProviderConf()
|
providerConf := config.GetProviderConf()
|
||||||
providerConf.BackupsPath = backupsPath
|
providerConf.BackupsPath = backupsPath
|
||||||
providerConf.CredentialsPath = credentialsPath
|
|
||||||
err = os.RemoveAll(credentialsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
NFPM_VERSION=2.15.1
|
NFPM_VERSION=2.16.0
|
||||||
NFPM_ARCH=${NFPM_ARCH:-amd64}
|
NFPM_ARCH=${NFPM_ARCH:-amd64}
|
||||||
if [ -z ${SFTPGO_VERSION} ]
|
if [ -z ${SFTPGO_VERSION} ]
|
||||||
then
|
then
|
||||||
|
@ -40,7 +40,6 @@ cp ${BASE_DIR}/sftpgo.json .
|
||||||
sed -i "s|sftpgo.db|/var/lib/sftpgo/sftpgo.db|" sftpgo.json
|
sed -i "s|sftpgo.db|/var/lib/sftpgo/sftpgo.db|" sftpgo.json
|
||||||
sed -i "s|\"users_base_dir\": \"\",|\"users_base_dir\": \"/srv/sftpgo/data\",|" sftpgo.json
|
sed -i "s|\"users_base_dir\": \"\",|\"users_base_dir\": \"/srv/sftpgo/data\",|" sftpgo.json
|
||||||
sed -i "s|\"backups\"|\"/srv/sftpgo/backups\"|" sftpgo.json
|
sed -i "s|\"backups\"|\"/srv/sftpgo/backups\"|" sftpgo.json
|
||||||
sed -i "s|\"credentials\"|\"/var/lib/sftpgo/credentials\"|" sftpgo.json
|
|
||||||
sed -i "s|\"certs\"|\"/var/lib/sftpgo/certs\"|" sftpgo.json
|
sed -i "s|\"certs\"|\"/var/lib/sftpgo/certs\"|" sftpgo.json
|
||||||
|
|
||||||
cat >nfpm.yaml <<EOF
|
cat >nfpm.yaml <<EOF
|
||||||
|
|
|
@ -202,7 +202,6 @@
|
||||||
},
|
},
|
||||||
"external_auth_hook": "",
|
"external_auth_hook": "",
|
||||||
"external_auth_scope": 0,
|
"external_auth_scope": 0,
|
||||||
"credentials_path": "credentials",
|
|
||||||
"pre_login_hook": "",
|
"pre_login_hook": "",
|
||||||
"post_login_hook": "",
|
"post_login_hook": "",
|
||||||
"post_login_scope": 0,
|
"post_login_scope": 0,
|
||||||
|
|
|
@ -2,7 +2,6 @@ package vfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -37,11 +36,6 @@ func (v *BaseVirtualFolder) GetEncryptionAdditionalData() string {
|
||||||
return fmt.Sprintf("folder_%v", v.Name)
|
return fmt.Sprintf("folder_%v", v.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetGCSCredentialsFilePath returns the path for GCS credentials
|
|
||||||
func (v *BaseVirtualFolder) GetGCSCredentialsFilePath() string {
|
|
||||||
return filepath.Join(credentialsDirPath, "folders", fmt.Sprintf("%v_gcs_credentials.json", v.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetACopy returns a copy
|
// GetACopy returns a copy
|
||||||
func (v *BaseVirtualFolder) GetACopy() BaseVirtualFolder {
|
func (v *BaseVirtualFolder) GetACopy() BaseVirtualFolder {
|
||||||
users := make([]string, len(v.Users))
|
users := make([]string, len(v.Users))
|
||||||
|
|
17
vfs/vfs.go
17
vfs/vfs.go
|
@ -31,22 +31,11 @@ var (
|
||||||
// ErrStorageSizeUnavailable is returned if the storage backend does not support getting the size
|
// ErrStorageSizeUnavailable is returned if the storage backend does not support getting the size
|
||||||
ErrStorageSizeUnavailable = errors.New("unable to get available size for this storage backend")
|
ErrStorageSizeUnavailable = errors.New("unable to get available size for this storage backend")
|
||||||
// ErrVfsUnsupported defines the error for an unsupported VFS operation
|
// ErrVfsUnsupported defines the error for an unsupported VFS operation
|
||||||
ErrVfsUnsupported = errors.New("not supported")
|
ErrVfsUnsupported = errors.New("not supported")
|
||||||
credentialsDirPath string
|
tempPath string
|
||||||
tempPath string
|
sftpFingerprints []string
|
||||||
sftpFingerprints []string
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetCredentialsDirPath sets the credentials dir path
|
|
||||||
func SetCredentialsDirPath(credentialsPath string) {
|
|
||||||
credentialsDirPath = credentialsPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCredentialsDirPath returns the credentials dir path
|
|
||||||
func GetCredentialsDirPath() string {
|
|
||||||
return credentialsDirPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTempPath sets the path for temporary files
|
// SetTempPath sets the path for temporary files
|
||||||
func SetTempPath(fsPath string) {
|
func SetTempPath(fsPath string) {
|
||||||
tempPath = fsPath
|
tempPath = fsPath
|
||||||
|
|
Loading…
Reference in a new issue