mirror of
https://github.com/drakkan/sftpgo.git
synced 2024-11-24 16:40:26 +00:00
add experimental plugin system
This commit is contained in:
parent
bfa4085932
commit
bd5191dfc5
101 changed files with 3190 additions and 1612 deletions
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/config"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -37,7 +37,7 @@ Please take a look at the usage below to customize the options.`,
|
|||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger.DisableLogger()
|
||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
||||
configDir = utils.CleanDirInput(configDir)
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
err := config.LoadConfig(configDir, configFile)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Unable to initialize data provider, config load error: %v", err)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -23,7 +23,7 @@ sftpgo service install
|
|||
Please take a look at the usage below to customize the startup options`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.Service{
|
||||
ConfigDir: utils.CleanDirInput(configDir),
|
||||
ConfigDir: util.CleanDirInput(configDir),
|
||||
ConfigFile: configFile,
|
||||
LogFilePath: logFilePath,
|
||||
LogMaxSize: logMaxSize,
|
||||
|
@ -60,7 +60,7 @@ func init() {
|
|||
func getCustomServeFlags() []string {
|
||||
result := []string{}
|
||||
if configDir != defaultConfigDir {
|
||||
configDir = utils.CleanDirInput(configDir)
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
result = append(result, "--"+configDirFlag)
|
||||
result = append(result, configDir)
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
|
@ -85,9 +86,9 @@ $ sftpgo portable
|
|||
Please take a look at the usage below to customize the serving parameters`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
portableDir := directoryToServe
|
||||
fsProvider := vfs.GetProviderByName(portableFsProvider)
|
||||
fsProvider := sdk.GetProviderByName(portableFsProvider)
|
||||
if !filepath.IsAbs(portableDir) {
|
||||
if fsProvider == vfs.LocalFilesystemProvider {
|
||||
if fsProvider == sdk.LocalFilesystemProvider {
|
||||
portableDir, _ = filepath.Abs(portableDir)
|
||||
} else {
|
||||
portableDir = os.TempDir()
|
||||
|
@ -96,7 +97,7 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
permissions := make(map[string][]string)
|
||||
permissions["/"] = portablePermissions
|
||||
portableGCSCredentials := ""
|
||||
if fsProvider == vfs.GCSFilesystemProvider && portableGCSCredentialsFile != "" {
|
||||
if fsProvider == sdk.GCSFilesystemProvider && portableGCSCredentialsFile != "" {
|
||||
contents, err := getFileContents(portableGCSCredentialsFile)
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to get GCS credentials: %v\n", err)
|
||||
|
@ -106,7 +107,7 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
portableGCSAutoCredentials = 0
|
||||
}
|
||||
portableSFTPPrivateKey := ""
|
||||
if fsProvider == vfs.SFTPFilesystemProvider && portableSFTPPrivateKeyPath != "" {
|
||||
if fsProvider == sdk.SFTPFilesystemProvider && portableSFTPPrivateKeyPath != "" {
|
||||
contents, err := getFileContents(portableSFTPPrivateKeyPath)
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to get SFTP private key: %v\n", err)
|
||||
|
@ -144,15 +145,21 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
Shutdown: make(chan bool),
|
||||
PortableMode: 1,
|
||||
PortableUser: dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: portableUsername,
|
||||
Password: portablePassword,
|
||||
PublicKeys: portablePublicKeys,
|
||||
Permissions: permissions,
|
||||
HomeDir: portableDir,
|
||||
Status: 1,
|
||||
Filters: sdk.UserFilters{
|
||||
FilePatterns: parsePatternsFilesFilters(),
|
||||
},
|
||||
},
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.GetProviderByName(portableFsProvider),
|
||||
Provider: sdk.GetProviderByName(portableFsProvider),
|
||||
S3Config: vfs.S3FsConfig{
|
||||
S3FsConfig: sdk.S3FsConfig{
|
||||
Bucket: portableS3Bucket,
|
||||
Region: portableS3Region,
|
||||
AccessKey: portableS3AccessKey,
|
||||
|
@ -163,14 +170,18 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
UploadPartSize: int64(portableS3ULPartSize),
|
||||
UploadConcurrency: portableS3ULConcurrency,
|
||||
},
|
||||
},
|
||||
GCSConfig: vfs.GCSFsConfig{
|
||||
GCSFsConfig: sdk.GCSFsConfig{
|
||||
Bucket: portableGCSBucket,
|
||||
Credentials: kms.NewPlainSecret(portableGCSCredentials),
|
||||
AutomaticCredentials: portableGCSAutoCredentials,
|
||||
StorageClass: portableGCSStorageClass,
|
||||
KeyPrefix: portableGCSKeyPrefix,
|
||||
},
|
||||
},
|
||||
AzBlobConfig: vfs.AzBlobFsConfig{
|
||||
AzBlobFsConfig: sdk.AzBlobFsConfig{
|
||||
Container: portableAzContainer,
|
||||
AccountName: portableAzAccountName,
|
||||
AccountKey: kms.NewPlainSecret(portableAzAccountKey),
|
||||
|
@ -182,10 +193,14 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
UploadPartSize: int64(portableAzULPartSize),
|
||||
UploadConcurrency: portableAzULConcurrency,
|
||||
},
|
||||
},
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(portableCryptPassphrase),
|
||||
},
|
||||
},
|
||||
SFTPConfig: vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: portableSFTPEndpoint,
|
||||
Username: portableSFTPUsername,
|
||||
Password: kms.NewPlainSecret(portableSFTPPassword),
|
||||
|
@ -196,8 +211,6 @@ Please take a look at the usage below to customize the serving parameters`,
|
|||
BufferSize: portableSFTPDBufferSize,
|
||||
},
|
||||
},
|
||||
Filters: dataprovider.UserFilters{
|
||||
FilePatterns: parsePatternsFilesFilters(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -335,12 +348,12 @@ by overlapping round-trip times`)
|
|||
rootCmd.AddCommand(portableCmd)
|
||||
}
|
||||
|
||||
func parsePatternsFilesFilters() []dataprovider.PatternsFilter {
|
||||
var patterns []dataprovider.PatternsFilter
|
||||
func parsePatternsFilesFilters() []sdk.PatternsFilter {
|
||||
var patterns []sdk.PatternsFilter
|
||||
for _, val := range portableAllowedPatterns {
|
||||
p, exts := getPatternsFilterValues(strings.TrimSpace(val))
|
||||
if p != "" {
|
||||
patterns = append(patterns, dataprovider.PatternsFilter{
|
||||
patterns = append(patterns, sdk.PatternsFilter{
|
||||
Path: path.Clean(p),
|
||||
AllowedPatterns: exts,
|
||||
DeniedPatterns: []string{},
|
||||
|
@ -359,7 +372,7 @@ func parsePatternsFilesFilters() []dataprovider.PatternsFilter {
|
|||
}
|
||||
}
|
||||
if !found {
|
||||
patterns = append(patterns, dataprovider.PatternsFilter{
|
||||
patterns = append(patterns, sdk.PatternsFilter{
|
||||
Path: path.Clean(p),
|
||||
AllowedPatterns: []string{},
|
||||
DeniedPatterns: exts,
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/config"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -30,7 +30,7 @@ Please take a look at the usage below to customize the options.`,
|
|||
logger.WarnToConsole("Unsupported target version, 10 is the only supported one")
|
||||
os.Exit(1)
|
||||
}
|
||||
configDir = utils.CleanDirInput(configDir)
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
err := config.LoadConfig(configDir, configFile)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Unable to initialize data provider, config load error: %v", err)
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -21,7 +21,7 @@ $ sftpgo serve
|
|||
Please take a look at the usage below to customize the startup options`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
service := service.Service{
|
||||
ConfigDir: utils.CleanDirInput(configDir),
|
||||
ConfigDir: util.CleanDirInput(configDir),
|
||||
ConfigFile: configFile,
|
||||
LogFilePath: logFilePath,
|
||||
LogMaxSize: logMaxSize,
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -16,8 +16,8 @@ var (
|
|||
Use: "start",
|
||||
Short: "Start SFTPGo Windows Service",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
configDir = utils.CleanDirInput(configDir)
|
||||
if !filepath.IsAbs(logFilePath) && utils.IsFileInputValid(logFilePath) {
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
if !filepath.IsAbs(logFilePath) && util.IsFileInputValid(logFilePath) {
|
||||
logFilePath = filepath.Join(configDir, logFilePath)
|
||||
}
|
||||
s := service.Service{
|
||||
|
|
|
@ -17,8 +17,9 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/httpclient"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -51,7 +52,8 @@ func InitializeActionHandler(handler ActionHandler) {
|
|||
|
||||
// ExecutePreAction executes a pre-* action and returns the result
|
||||
func ExecutePreAction(user *dataprovider.User, operation, filePath, virtualPath, protocol string, fileSize int64, openFlags int) error {
|
||||
if !utils.IsStringInSlice(operation, Config.Actions.ExecuteOn) {
|
||||
plugin.Handler.NotifyFsEvent(operation, user.Username, filePath, "", "", protocol, fileSize, nil)
|
||||
if !util.IsStringInSlice(operation, Config.Actions.ExecuteOn) {
|
||||
// for pre-delete we execute the internal handling on error, so we must return errUnconfiguredAction.
|
||||
// Other pre action will deny the operation on error so if we have no configuration we must return
|
||||
// a nil error
|
||||
|
@ -66,9 +68,10 @@ func ExecutePreAction(user *dataprovider.User, operation, filePath, virtualPath,
|
|||
|
||||
// ExecuteActionNotification executes the defined hook, if any, for the specified action
|
||||
func ExecuteActionNotification(user *dataprovider.User, operation, filePath, virtualPath, target, sshCmd, protocol string, fileSize int64, err error) {
|
||||
plugin.Handler.NotifyFsEvent(operation, user.Username, filePath, target, sshCmd, protocol, fileSize, err)
|
||||
notification := newActionNotification(user, operation, filePath, virtualPath, target, sshCmd, protocol, fileSize, 0, err)
|
||||
|
||||
if utils.IsStringInSlice(operation, Config.Actions.ExecuteSync) {
|
||||
if util.IsStringInSlice(operation, Config.Actions.ExecuteSync) {
|
||||
actionHandler.Handle(notification) //nolint:errcheck
|
||||
return
|
||||
}
|
||||
|
@ -110,17 +113,17 @@ func newActionNotification(
|
|||
fsConfig := user.GetFsConfigForPath(virtualPath)
|
||||
|
||||
switch fsConfig.Provider {
|
||||
case vfs.S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
bucket = fsConfig.S3Config.Bucket
|
||||
endpoint = fsConfig.S3Config.Endpoint
|
||||
case vfs.GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
bucket = fsConfig.GCSConfig.Bucket
|
||||
case vfs.AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
bucket = fsConfig.AzBlobConfig.Container
|
||||
if fsConfig.AzBlobConfig.Endpoint != "" {
|
||||
endpoint = fsConfig.AzBlobConfig.Endpoint
|
||||
}
|
||||
case vfs.SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
endpoint = fsConfig.SFTPConfig.Endpoint
|
||||
}
|
||||
|
||||
|
@ -149,7 +152,7 @@ func newActionNotification(
|
|||
type defaultActionHandler struct{}
|
||||
|
||||
func (h *defaultActionHandler) Handle(notification *ActionNotification) error {
|
||||
if !utils.IsStringInSlice(notification.Action, Config.Actions.ExecuteOn) {
|
||||
if !util.IsStringInSlice(notification.Action, Config.Actions.ExecuteOn) {
|
||||
return errUnconfiguredAction
|
||||
}
|
||||
|
||||
|
|
|
@ -12,27 +12,38 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
func TestNewActionNotification(t *testing.T) {
|
||||
user := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
},
|
||||
}
|
||||
user.FsConfig.Provider = vfs.LocalFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.LocalFilesystemProvider
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{
|
||||
S3FsConfig: sdk.S3FsConfig{
|
||||
Bucket: "s3bucket",
|
||||
Endpoint: "endpoint",
|
||||
},
|
||||
}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{
|
||||
GCSFsConfig: sdk.GCSFsConfig{
|
||||
Bucket: "gcsbucket",
|
||||
},
|
||||
}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{
|
||||
AzBlobFsConfig: sdk.AzBlobFsConfig{
|
||||
Container: "azcontainer",
|
||||
Endpoint: "azendpoint",
|
||||
},
|
||||
}
|
||||
user.FsConfig.SFTPConfig = vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: "sftpendpoint",
|
||||
},
|
||||
}
|
||||
a := newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSFTP, 123, 0, errors.New("fake error"))
|
||||
assert.Equal(t, user.Username, a.Username)
|
||||
|
@ -40,19 +51,19 @@ func TestNewActionNotification(t *testing.T) {
|
|||
assert.Equal(t, 0, len(a.Endpoint))
|
||||
assert.Equal(t, 0, a.Status)
|
||||
|
||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
user.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSSH, 123, 0, nil)
|
||||
assert.Equal(t, "s3bucket", a.Bucket)
|
||||
assert.Equal(t, "endpoint", a.Endpoint)
|
||||
assert.Equal(t, 1, a.Status)
|
||||
|
||||
user.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSCP, 123, 0, ErrQuotaExceeded)
|
||||
assert.Equal(t, "gcsbucket", a.Bucket)
|
||||
assert.Equal(t, 0, len(a.Endpoint))
|
||||
assert.Equal(t, 2, a.Status)
|
||||
|
||||
user.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSCP, 123, 0, nil)
|
||||
assert.Equal(t, "azcontainer", a.Bucket)
|
||||
assert.Equal(t, "azendpoint", a.Endpoint)
|
||||
|
@ -64,7 +75,7 @@ func TestNewActionNotification(t *testing.T) {
|
|||
assert.Equal(t, 1, a.Status)
|
||||
assert.Equal(t, os.O_APPEND, a.OpenFlags)
|
||||
|
||||
user.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSFTP, 123, 0, nil)
|
||||
assert.Equal(t, "sftpendpoint", a.Endpoint)
|
||||
}
|
||||
|
@ -77,7 +88,9 @@ func TestActionHTTP(t *testing.T) {
|
|||
Hook: fmt.Sprintf("http://%v", httpAddr),
|
||||
}
|
||||
user := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
},
|
||||
}
|
||||
a := newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSFTP, 123, 0, nil)
|
||||
err := actionHandler.Handle(a)
|
||||
|
@ -110,7 +123,9 @@ func TestActionCMD(t *testing.T) {
|
|||
Hook: hookCmd,
|
||||
}
|
||||
user := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
},
|
||||
}
|
||||
a := newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSFTP, 123, 0, nil)
|
||||
err = actionHandler.Handle(a)
|
||||
|
@ -133,7 +148,9 @@ func TestWrongActions(t *testing.T) {
|
|||
Hook: badCommand,
|
||||
}
|
||||
user := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
},
|
||||
}
|
||||
|
||||
a := newActionNotification(user, operationUpload, "", "", "", "", ProtocolSFTP, 123, 0, nil)
|
||||
|
@ -180,8 +197,10 @@ func TestPreDeleteAction(t *testing.T) {
|
|||
err = os.MkdirAll(homeDir, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
HomeDir: homeDir,
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/httpclient"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -330,10 +330,10 @@ func (t *ConnectionTransfer) getConnectionTransferAsString() string {
|
|||
}
|
||||
result += fmt.Sprintf("%#v ", t.VirtualPath)
|
||||
if t.Size > 0 {
|
||||
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(t.StartTime))
|
||||
speed := float64(t.Size) / float64(utils.GetTimeAsMsSinceEpoch(time.Now())-t.StartTime)
|
||||
result += fmt.Sprintf("Size: %#v Elapsed: %#v Speed: \"%.1f KB/s\"", utils.ByteCountIEC(t.Size),
|
||||
utils.GetDurationAsString(elapsed), speed)
|
||||
elapsed := time.Since(util.GetTimeFromMsecSinceEpoch(t.StartTime))
|
||||
speed := float64(t.Size) / float64(util.GetTimeAsMsSinceEpoch(time.Now())-t.StartTime)
|
||||
result += fmt.Sprintf("Size: %#v Elapsed: %#v Speed: \"%.1f KB/s\"", util.ByteCountIEC(t.Size),
|
||||
util.GetDurationAsString(elapsed), speed)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
@ -595,7 +595,7 @@ func (conns *ActiveConnections) Add(c ActiveConnection) {
|
|||
defer conns.Unlock()
|
||||
|
||||
conns.connections = append(conns.connections, c)
|
||||
metrics.UpdateActiveConnectionsSize(len(conns.connections))
|
||||
metric.UpdateActiveConnectionsSize(len(conns.connections))
|
||||
logger.Debug(c.GetProtocol(), c.GetID(), "connection added, num open connections: %v", len(conns.connections))
|
||||
}
|
||||
|
||||
|
@ -629,7 +629,7 @@ func (conns *ActiveConnections) Remove(connectionID string) {
|
|||
conns.connections[idx] = conns.connections[lastIdx]
|
||||
conns.connections[lastIdx] = nil
|
||||
conns.connections = conns.connections[:lastIdx]
|
||||
metrics.UpdateActiveConnectionsSize(lastIdx)
|
||||
metric.UpdateActiveConnectionsSize(lastIdx)
|
||||
logger.Debug(conn.GetProtocol(), conn.GetID(), "connection removed, close fs error: %v, num open connections: %v",
|
||||
err, lastIdx)
|
||||
return
|
||||
|
@ -721,9 +721,9 @@ func (conns *ActiveConnections) checkIdles() {
|
|||
logger.Debug(conn.GetProtocol(), conn.GetID(), "close idle connection, idle time: %v, username: %#v close err: %v",
|
||||
time.Since(conn.GetLastActivity()), conn.GetUsername(), err)
|
||||
if isFTPNoAuth {
|
||||
ip := utils.GetIPFromRemoteAddress(c.GetRemoteAddress())
|
||||
ip := util.GetIPFromRemoteAddress(c.GetRemoteAddress())
|
||||
logger.ConnectionFailedLog("", ip, dataprovider.LoginMethodNoAuthTryed, c.GetProtocol(), "client idle")
|
||||
metrics.AddNoAuthTryed()
|
||||
metric.AddNoAuthTryed()
|
||||
AddDefenderEvent(ip, HostEventNoLoginTried)
|
||||
dataprovider.ExecutePostLoginHook(&dataprovider.User{}, dataprovider.LoginMethodNoAuthTryed, ip, c.GetProtocol(),
|
||||
dataprovider.ErrNoAuthTryed)
|
||||
|
@ -794,8 +794,8 @@ func (conns *ActiveConnections) GetStats() []*ConnectionStatus {
|
|||
ConnectionID: c.GetID(),
|
||||
ClientVersion: c.GetClientVersion(),
|
||||
RemoteAddress: c.GetRemoteAddress(),
|
||||
ConnectionTime: utils.GetTimeAsMsSinceEpoch(c.GetConnectionTime()),
|
||||
LastActivity: utils.GetTimeAsMsSinceEpoch(c.GetLastActivity()),
|
||||
ConnectionTime: util.GetTimeAsMsSinceEpoch(c.GetConnectionTime()),
|
||||
LastActivity: util.GetTimeAsMsSinceEpoch(c.GetLastActivity()),
|
||||
Protocol: c.GetProtocol(),
|
||||
Command: c.GetCommand(),
|
||||
Transfers: c.GetTransfers(),
|
||||
|
@ -829,8 +829,8 @@ type ConnectionStatus struct {
|
|||
|
||||
// GetConnectionDuration returns the connection duration as string
|
||||
func (c *ConnectionStatus) GetConnectionDuration() string {
|
||||
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(c.ConnectionTime))
|
||||
return utils.GetDurationAsString(elapsed)
|
||||
elapsed := time.Since(util.GetTimeFromMsecSinceEpoch(c.ConnectionTime))
|
||||
return util.GetDurationAsString(elapsed)
|
||||
}
|
||||
|
||||
// GetConnectionInfo returns connection info.
|
||||
|
@ -912,7 +912,7 @@ func (s *ActiveScans) AddUserQuotaScan(username string) bool {
|
|||
}
|
||||
s.UserHomeScans = append(s.UserHomeScans, ActiveQuotaScan{
|
||||
Username: username,
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
StartTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
@ -960,7 +960,7 @@ func (s *ActiveScans) AddVFolderQuotaScan(folderName string) bool {
|
|||
}
|
||||
s.FolderScans = append(s.FolderScans, ActiveVirtualFolderQuotaScan{
|
||||
Name: folderName,
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
StartTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -20,7 +20,8 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -323,7 +324,9 @@ func TestIdleConnections(t *testing.T) {
|
|||
|
||||
username := "test_user"
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
},
|
||||
}
|
||||
c := NewBaseConnection(sshConn1.id+"_1", ProtocolSFTP, "", user)
|
||||
c.lastActivity = time.Now().Add(-24 * time.Hour).UnixNano()
|
||||
|
@ -410,7 +413,9 @@ func TestSwapConnection(t *testing.T) {
|
|||
assert.Equal(t, "", Connections.GetStats()[0].Username)
|
||||
}
|
||||
c = NewBaseConnection("id", ProtocolFTP, "", dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: userTestUsername,
|
||||
},
|
||||
})
|
||||
fakeConn = &fakeConnection{
|
||||
BaseConnection: c,
|
||||
|
@ -443,7 +448,9 @@ func TestAtomicUpload(t *testing.T) {
|
|||
func TestConnectionStatus(t *testing.T) {
|
||||
username := "test_user"
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
},
|
||||
}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
c1 := NewBaseConnection("id1", ProtocolSFTP, "", user)
|
||||
|
@ -634,7 +641,11 @@ func TestPostConnectHook(t *testing.T) {
|
|||
|
||||
func TestCryptoConvertFileInfo(t *testing.T) {
|
||||
name := "name"
|
||||
fs, err := vfs.NewCryptFs("connID1", os.TempDir(), "", vfs.CryptFsConfig{Passphrase: kms.NewPlainSecret("secret")})
|
||||
fs, err := vfs.NewCryptFs("connID1", os.TempDir(), "", vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret("secret"),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
cryptFs := fs.(*vfs.CryptFs)
|
||||
info := vfs.NewFileInfo(name, true, 48, time.Now(), false)
|
||||
|
@ -654,15 +665,15 @@ func TestFolderCopy(t *testing.T) {
|
|||
MappedPath: filepath.Clean(os.TempDir()),
|
||||
UsedQuotaSize: 4096,
|
||||
UsedQuotaFiles: 2,
|
||||
LastQuotaUpdate: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
LastQuotaUpdate: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
Users: []string{"user1", "user2"},
|
||||
}
|
||||
folderCopy := folder.GetACopy()
|
||||
folder.ID = 2
|
||||
folder.Users = []string{"user3"}
|
||||
require.Len(t, folderCopy.Users, 2)
|
||||
require.True(t, utils.IsStringInSlice("user1", folderCopy.Users))
|
||||
require.True(t, utils.IsStringInSlice("user2", folderCopy.Users))
|
||||
require.True(t, util.IsStringInSlice("user1", folderCopy.Users))
|
||||
require.True(t, util.IsStringInSlice("user2", folderCopy.Users))
|
||||
require.Equal(t, int64(1), folderCopy.ID)
|
||||
require.Equal(t, folder.Name, folderCopy.Name)
|
||||
require.Equal(t, folder.MappedPath, folderCopy.MappedPath)
|
||||
|
@ -672,13 +683,15 @@ func TestFolderCopy(t *testing.T) {
|
|||
|
||||
folder.FsConfig = vfs.Filesystem{
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret("crypto secret"),
|
||||
},
|
||||
},
|
||||
}
|
||||
folderCopy = folder.GetACopy()
|
||||
folder.FsConfig.CryptConfig.Passphrase = kms.NewEmptySecret()
|
||||
require.Len(t, folderCopy.Users, 1)
|
||||
require.True(t, utils.IsStringInSlice("user3", folderCopy.Users))
|
||||
require.True(t, util.IsStringInSlice("user3", folderCopy.Users))
|
||||
require.Equal(t, int64(2), folderCopy.ID)
|
||||
require.Equal(t, folder.Name, folderCopy.Name)
|
||||
require.Equal(t, folder.MappedPath, folderCopy.MappedPath)
|
||||
|
@ -690,7 +703,9 @@ func TestFolderCopy(t *testing.T) {
|
|||
|
||||
func TestCachedFs(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
conn := NewBaseConnection("id", ProtocolSFTP, "", user)
|
||||
// changing the user should not affect the connection
|
||||
|
@ -706,10 +721,10 @@ func TestCachedFs(t *testing.T) {
|
|||
_, p, err = conn.GetFsAndResolvedPath("/")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, filepath.Clean(os.TempDir()), p)
|
||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
user.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
_, err = user.GetFilesystem("")
|
||||
assert.Error(t, err)
|
||||
conn.User.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
conn.User.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
_, p, err = conn.GetFsAndResolvedPath("/")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, filepath.Clean(os.TempDir()), p)
|
||||
|
@ -718,11 +733,11 @@ func TestCachedFs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseAllowedIPAndRanges(t *testing.T) {
|
||||
_, err := utils.ParseAllowedIPAndRanges([]string{"1.1.1.1", "not an ip"})
|
||||
_, err := util.ParseAllowedIPAndRanges([]string{"1.1.1.1", "not an ip"})
|
||||
assert.Error(t, err)
|
||||
_, err = utils.ParseAllowedIPAndRanges([]string{"1.1.1.5", "192.168.1.0/240"})
|
||||
_, err = util.ParseAllowedIPAndRanges([]string{"1.1.1.5", "192.168.1.0/240"})
|
||||
assert.Error(t, err)
|
||||
allow, err := utils.ParseAllowedIPAndRanges([]string{"192.168.1.2", "172.16.0.0/24"})
|
||||
allow, err := util.ParseAllowedIPAndRanges([]string{"192.168.1.2", "172.16.0.0/24"})
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, allow[0](net.ParseIP("192.168.1.2")))
|
||||
assert.False(t, allow[0](net.ParseIP("192.168.2.2")))
|
||||
|
|
|
@ -15,7 +15,8 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -40,7 +41,7 @@ type BaseConnection struct {
|
|||
// NewBaseConnection returns a new BaseConnection
|
||||
func NewBaseConnection(id, protocol, remoteAddr string, user dataprovider.User) *BaseConnection {
|
||||
connID := id
|
||||
if utils.IsStringInSlice(protocol, supportedProtocols) {
|
||||
if util.IsStringInSlice(protocol, supportedProtocols) {
|
||||
connID = fmt.Sprintf("%v_%v", protocol, id)
|
||||
}
|
||||
return &BaseConnection{
|
||||
|
@ -82,7 +83,7 @@ func (c *BaseConnection) GetProtocol() string {
|
|||
// SetProtocol sets the protocol for this connection
|
||||
func (c *BaseConnection) SetProtocol(protocol string) {
|
||||
c.protocol = protocol
|
||||
if utils.IsStringInSlice(c.protocol, supportedProtocols) {
|
||||
if util.IsStringInSlice(c.protocol, supportedProtocols) {
|
||||
c.ID = fmt.Sprintf("%v_%v", c.protocol, c.ID)
|
||||
}
|
||||
}
|
||||
|
@ -155,7 +156,7 @@ func (c *BaseConnection) GetTransfers() []ConnectionTransfer {
|
|||
transfers = append(transfers, ConnectionTransfer{
|
||||
ID: t.GetID(),
|
||||
OperationType: operationType,
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(t.GetStartTime()),
|
||||
StartTime: util.GetTimeAsMsSinceEpoch(t.GetStartTime()),
|
||||
Size: t.GetSize(),
|
||||
VirtualPath: t.GetVirtualPath(),
|
||||
})
|
||||
|
@ -881,22 +882,22 @@ func (c *BaseConnection) isLocalOrSameFolderRename(virtualSourcePath, virtualTar
|
|||
return true
|
||||
}
|
||||
// we have different folders, only local fs is supported
|
||||
if sourceFolder.FsConfig.Provider == vfs.LocalFilesystemProvider &&
|
||||
dstFolder.FsConfig.Provider == vfs.LocalFilesystemProvider {
|
||||
if sourceFolder.FsConfig.Provider == sdk.LocalFilesystemProvider &&
|
||||
dstFolder.FsConfig.Provider == sdk.LocalFilesystemProvider {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
if c.User.FsConfig.Provider != vfs.LocalFilesystemProvider {
|
||||
if c.User.FsConfig.Provider != sdk.LocalFilesystemProvider {
|
||||
return false
|
||||
}
|
||||
if errSrc == nil {
|
||||
if sourceFolder.FsConfig.Provider == vfs.LocalFilesystemProvider {
|
||||
if sourceFolder.FsConfig.Provider == sdk.LocalFilesystemProvider {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if errDst == nil {
|
||||
if dstFolder.FsConfig.Provider == vfs.LocalFilesystemProvider {
|
||||
if dstFolder.FsConfig.Provider == sdk.LocalFilesystemProvider {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -47,8 +48,10 @@ func TestRemoveErrors(t *testing.T) {
|
|||
homePath := filepath.Join(os.TempDir(), "home")
|
||||
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "remove_errors_user",
|
||||
HomeDir: homePath,
|
||||
},
|
||||
VirtualFolders: []vfs.VirtualFolder{
|
||||
{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
|
@ -78,7 +81,9 @@ func TestSetStatMode(t *testing.T) {
|
|||
|
||||
fakePath := "fake path"
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -145,8 +150,10 @@ func TestRenameVirtualFolders(t *testing.T) {
|
|||
|
||||
func TestUpdateQuotaAfterRename(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: userTestUsername,
|
||||
HomeDir: filepath.Join(os.TempDir(), "home"),
|
||||
},
|
||||
}
|
||||
mappedPath := filepath.Join(os.TempDir(), "vdir")
|
||||
user.Permissions = make(map[string][]string)
|
||||
|
@ -218,7 +225,7 @@ func TestUpdateQuotaAfterRename(t *testing.T) {
|
|||
|
||||
func TestErrorsMapping(t *testing.T) {
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
conn := NewBaseConnection("", ProtocolSFTP, "", dataprovider.User{HomeDir: os.TempDir()})
|
||||
conn := NewBaseConnection("", ProtocolSFTP, "", dataprovider.User{BaseUser: sdk.BaseUser{HomeDir: os.TempDir()}})
|
||||
for _, protocol := range supportedProtocols {
|
||||
conn.SetProtocol(protocol)
|
||||
err := conn.GetFsError(fs, os.ErrNotExist)
|
||||
|
@ -276,9 +283,11 @@ func TestMaxWriteSize(t *testing.T) {
|
|||
permissions := make(map[string][]string)
|
||||
permissions["/"] = []string{dataprovider.PermAny}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: userTestUsername,
|
||||
Permissions: permissions,
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
fs, err := user.GetFilesystem("123")
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/yl2chen/cidranger"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// HostEvent is the enumerable for the support host event
|
||||
|
@ -289,7 +289,7 @@ func (d *memoryDefender) GetHost(ip string) (*DefenderEntry, error) {
|
|||
}
|
||||
}
|
||||
|
||||
return nil, utils.NewRecordNotFoundError("host not found")
|
||||
return nil, util.NewRecordNotFoundError("host not found")
|
||||
}
|
||||
|
||||
// IsBanned returns true if the specified IP is banned
|
||||
|
@ -522,7 +522,7 @@ func loadHostListFromFile(name string) (*HostList, error) {
|
|||
if name == "" {
|
||||
return nil, nil
|
||||
}
|
||||
if !utils.IsFileInputValid(name) {
|
||||
if !util.IsFileInputValid(name) {
|
||||
return nil, fmt.Errorf("invalid host list file name %#v", name)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -114,7 +114,7 @@ func (p *basicAuthProvider) getHashedPassword(username string) (string, bool) {
|
|||
// ValidateCredentials returns true if the credentials are valid
|
||||
func (p *basicAuthProvider) ValidateCredentials(username, password string) bool {
|
||||
if hashedPwd, ok := p.getHashedPassword(username); ok {
|
||||
if utils.IsStringPrefixInSlice(hashedPwd, bcryptPwdPrefixes) {
|
||||
if util.IsStringPrefixInSlice(hashedPwd, bcryptPwdPrefixes) {
|
||||
err := bcrypt.CompareHashAndPassword([]byte(hashedPwd), []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/httpdtest"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -354,7 +355,7 @@ func TestPermissionErrors(t *testing.T) {
|
|||
func TestFileNotAllowedErrors(t *testing.T) {
|
||||
deniedDir := "/denied"
|
||||
u := getTestUser()
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: deniedDir,
|
||||
DeniedPatterns: []string{"*.txt"},
|
||||
|
@ -2373,22 +2374,26 @@ func TestSFTPLoopError(t *testing.T) {
|
|||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: "sftp",
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.SFTPFilesystemProvider,
|
||||
Provider: sdk.SFTPFilesystemProvider,
|
||||
SFTPConfig: vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: user2.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualPath: "/vdir",
|
||||
})
|
||||
|
||||
user2.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user2.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
user2.FsConfig.SFTPConfig = vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: user1.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
}
|
||||
|
||||
user1, resp, err := httpdtest.AddUser(user1, http.StatusCreated)
|
||||
|
@ -2438,14 +2443,16 @@ func TestNonLocalCrossRename(t *testing.T) {
|
|||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: folderNameSFTP,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.SFTPFilesystemProvider,
|
||||
Provider: sdk.SFTPFilesystemProvider,
|
||||
SFTPConfig: vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: baseUser.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualPath: vdirSFTPPath,
|
||||
})
|
||||
mappedPathCrypt := filepath.Join(os.TempDir(), "crypt")
|
||||
|
@ -2455,11 +2462,13 @@ func TestNonLocalCrossRename(t *testing.T) {
|
|||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: folderNameCrypt,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
MappedPath: mappedPathCrypt,
|
||||
},
|
||||
VirtualPath: vdirCryptPath,
|
||||
|
@ -2556,11 +2565,13 @@ func TestNonLocalCrossRenameNonLocalBaseUser(t *testing.T) {
|
|||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: folderNameCrypt,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
MappedPath: mappedPathCrypt,
|
||||
},
|
||||
VirtualPath: vdirCryptPath,
|
||||
|
@ -2639,14 +2650,14 @@ func TestProxyProtocol(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSetProtocol(t *testing.T) {
|
||||
conn := common.NewBaseConnection("id", "sshd_exec", "", dataprovider.User{HomeDir: os.TempDir()})
|
||||
conn := common.NewBaseConnection("id", "sshd_exec", "", dataprovider.User{BaseUser: sdk.BaseUser{HomeDir: os.TempDir()}})
|
||||
conn.SetProtocol(common.ProtocolSCP)
|
||||
require.Equal(t, "SCP_id", conn.GetID())
|
||||
}
|
||||
|
||||
func TestGetFsError(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
u.FsConfig.GCSConfig.Bucket = "test"
|
||||
u.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret("invalid JSON for credentials")
|
||||
conn := common.NewBaseConnection("", common.ProtocolFTP, "", u)
|
||||
|
@ -2704,11 +2715,13 @@ func getSftpClient(user dataprovider.User) (*ssh.Client, *sftp.Client, error) {
|
|||
|
||||
func getTestUser() dataprovider.User {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: defaultUsername,
|
||||
Password: defaultPassword,
|
||||
HomeDir: filepath.Join(homeBasePath, defaultUsername),
|
||||
Status: 1,
|
||||
ExpirationDate: 0,
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = allPerms
|
||||
|
@ -2718,7 +2731,7 @@ func getTestUser() dataprovider.User {
|
|||
func getTestSFTPUser() dataprovider.User {
|
||||
u := getTestUser()
|
||||
u.Username = defaultSFTPUsername
|
||||
u.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
u.FsConfig.SFTPConfig.Endpoint = sftpServerAddr
|
||||
u.FsConfig.SFTPConfig.Username = defaultUsername
|
||||
u.FsConfig.SFTPConfig.Password = kms.NewPlainSecret(defaultPassword)
|
||||
|
@ -2727,7 +2740,7 @@ func getTestSFTPUser() dataprovider.User {
|
|||
|
||||
func getCryptFsUser() dataprovider.User {
|
||||
u := getTestUser()
|
||||
u.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
u.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret(defaultPassword)
|
||||
return u
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -78,9 +78,9 @@ func (r *RateLimiterConfig) validate() error {
|
|||
return fmt.Errorf("invalid entries_hard_limit %v must be > %v", r.EntriesHardLimit, r.EntriesSoftLimit)
|
||||
}
|
||||
}
|
||||
r.Protocols = utils.RemoveDuplicates(r.Protocols)
|
||||
r.Protocols = util.RemoveDuplicates(r.Protocols)
|
||||
for _, protocol := range r.Protocols {
|
||||
if !utils.IsStringInSlice(protocol, rateLimiterProtocolValues) {
|
||||
if !util.IsStringInSlice(protocol, rateLimiterProtocolValues) {
|
||||
return fmt.Errorf("invalid protocol %#v", protocol)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// CertManager defines a TLS certificate manager
|
||||
|
@ -98,7 +98,7 @@ func (m *CertManager) LoadCRLs() error {
|
|||
var crls []*pkix.CertificateList
|
||||
|
||||
for _, revocationList := range m.caRevocationLists {
|
||||
if !utils.IsFileInputValid(revocationList) {
|
||||
if !util.IsFileInputValid(revocationList) {
|
||||
return fmt.Errorf("invalid root CA revocation list %#v", revocationList)
|
||||
}
|
||||
if revocationList != "" && !filepath.IsAbs(revocationList) {
|
||||
|
@ -145,7 +145,7 @@ func (m *CertManager) LoadRootCAs() error {
|
|||
rootCAs := x509.NewCertPool()
|
||||
|
||||
for _, rootCA := range m.caCertificates {
|
||||
if !utils.IsFileInputValid(rootCA) {
|
||||
if !util.IsFileInputValid(rootCA) {
|
||||
return fmt.Errorf("invalid root CA certificate %#v", rootCA)
|
||||
}
|
||||
if rootCA != "" && !filepath.IsAbs(rootCA) {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -139,7 +139,7 @@ func (t *BaseTransfer) Truncate(fsPath string, size int64) (int64, error) {
|
|||
if t.MaxWriteSize > 0 {
|
||||
sizeDiff := initialSize - size
|
||||
t.MaxWriteSize += sizeDiff
|
||||
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
|
||||
metric.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
|
||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
||||
}
|
||||
t.Unlock()
|
||||
|
@ -206,7 +206,7 @@ func (t *BaseTransfer) Close() error {
|
|||
if t.isNewFile {
|
||||
numFiles = 1
|
||||
}
|
||||
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
|
||||
metric.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
|
||||
if t.File != nil && t.Connection.IsQuotaExceededError(t.ErrTransfer) {
|
||||
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
|
||||
err = t.Fs.Remove(t.File.Name(), false)
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -50,9 +51,11 @@ func TestTransferUpdateQuota(t *testing.T) {
|
|||
|
||||
func TestTransferThrottling(t *testing.T) {
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test",
|
||||
UploadBandwidth: 50,
|
||||
DownloadBandwidth: 40,
|
||||
},
|
||||
}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
testFileSize := int64(131072)
|
||||
|
@ -88,8 +91,10 @@ func TestRealPath(t *testing.T) {
|
|||
testFile := filepath.Join(os.TempDir(), "afile.txt")
|
||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
u.Permissions = make(map[string][]string)
|
||||
u.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -119,8 +124,10 @@ func TestTruncate(t *testing.T) {
|
|||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
u.Permissions = make(map[string][]string)
|
||||
u.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -183,8 +190,10 @@ func TestTransferErrors(t *testing.T) {
|
|||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||
fs := vfs.NewOsFs("id", os.TempDir(), "")
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
err := os.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
@ -255,11 +264,13 @@ func TestTransferErrors(t *testing.T) {
|
|||
|
||||
func TestRemovePartialCryptoFile(t *testing.T) {
|
||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||
fs, err := vfs.NewCryptFs("id", os.TempDir(), "", vfs.CryptFsConfig{Passphrase: kms.NewPlainSecret("secret")})
|
||||
fs, err := vfs.NewCryptFs("id", os.TempDir(), "", vfs.CryptFsConfig{CryptFsConfig: sdk.CryptFsConfig{Passphrase: kms.NewPlainSecret("secret")}})
|
||||
require.NoError(t, err)
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, "", u)
|
||||
transfer := NewBaseTransfer(nil, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload, 0, 0, 0, true, fs)
|
||||
|
|
|
@ -18,9 +18,10 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/httpd"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/telemetry"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/webdavd"
|
||||
)
|
||||
|
@ -94,6 +95,7 @@ type globalConfig struct {
|
|||
HTTPConfig httpclient.Config `json:"http" mapstructure:"http"`
|
||||
KMSConfig kms.Configuration `json:"kms" mapstructure:"kms"`
|
||||
TelemetryConfig telemetry.Conf `json:"telemetry" mapstructure:"telemetry"`
|
||||
PluginsConfig []plugin.Config `json:"plugins" mapstructure:"plugins"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -275,6 +277,7 @@ func Init() {
|
|||
CertificateKeyFile: "",
|
||||
TLSCipherSuites: nil,
|
||||
},
|
||||
PluginsConfig: nil,
|
||||
}
|
||||
|
||||
viper.SetEnvPrefix(configEnvPrefix)
|
||||
|
@ -371,6 +374,11 @@ func SetTelemetryConfig(config telemetry.Conf) {
|
|||
globalConf.TelemetryConfig = config
|
||||
}
|
||||
|
||||
// GetPluginsConfig returns the plugins configuration
|
||||
func GetPluginsConfig() []plugin.Config {
|
||||
return globalConf.PluginsConfig
|
||||
}
|
||||
|
||||
// HasServicesToStart returns true if the config defines at least a service to start.
|
||||
// Supported services are SFTP, FTP and WebDAV
|
||||
func HasServicesToStart() bool {
|
||||
|
@ -388,17 +396,17 @@ func HasServicesToStart() bool {
|
|||
|
||||
func getRedactedGlobalConf() globalConfig {
|
||||
conf := globalConf
|
||||
conf.Common.Actions.Hook = utils.GetRedactedURL(conf.Common.Actions.Hook)
|
||||
conf.Common.StartupHook = utils.GetRedactedURL(conf.Common.StartupHook)
|
||||
conf.Common.PostConnectHook = utils.GetRedactedURL(conf.Common.PostConnectHook)
|
||||
conf.SFTPD.KeyboardInteractiveHook = utils.GetRedactedURL(conf.SFTPD.KeyboardInteractiveHook)
|
||||
conf.Common.Actions.Hook = util.GetRedactedURL(conf.Common.Actions.Hook)
|
||||
conf.Common.StartupHook = util.GetRedactedURL(conf.Common.StartupHook)
|
||||
conf.Common.PostConnectHook = util.GetRedactedURL(conf.Common.PostConnectHook)
|
||||
conf.SFTPD.KeyboardInteractiveHook = util.GetRedactedURL(conf.SFTPD.KeyboardInteractiveHook)
|
||||
conf.HTTPDConfig.SigningPassphrase = "[redacted]"
|
||||
conf.ProviderConf.Password = "[redacted]"
|
||||
conf.ProviderConf.Actions.Hook = utils.GetRedactedURL(conf.ProviderConf.Actions.Hook)
|
||||
conf.ProviderConf.ExternalAuthHook = utils.GetRedactedURL(conf.ProviderConf.ExternalAuthHook)
|
||||
conf.ProviderConf.PreLoginHook = utils.GetRedactedURL(conf.ProviderConf.PreLoginHook)
|
||||
conf.ProviderConf.PostLoginHook = utils.GetRedactedURL(conf.ProviderConf.PostLoginHook)
|
||||
conf.ProviderConf.CheckPasswordHook = utils.GetRedactedURL(conf.ProviderConf.CheckPasswordHook)
|
||||
conf.ProviderConf.Actions.Hook = util.GetRedactedURL(conf.ProviderConf.Actions.Hook)
|
||||
conf.ProviderConf.ExternalAuthHook = util.GetRedactedURL(conf.ProviderConf.ExternalAuthHook)
|
||||
conf.ProviderConf.PreLoginHook = util.GetRedactedURL(conf.ProviderConf.PreLoginHook)
|
||||
conf.ProviderConf.PostLoginHook = util.GetRedactedURL(conf.ProviderConf.PostLoginHook)
|
||||
conf.ProviderConf.CheckPasswordHook = util.GetRedactedURL(conf.ProviderConf.CheckPasswordHook)
|
||||
return conf
|
||||
}
|
||||
|
||||
|
@ -406,7 +414,7 @@ func setConfigFile(configDir, configFile string) {
|
|||
if configFile == "" {
|
||||
return
|
||||
}
|
||||
if !filepath.IsAbs(configFile) && utils.IsFileInputValid(configFile) {
|
||||
if !filepath.IsAbs(configFile) && util.IsFileInputValid(configFile) {
|
||||
configFile = filepath.Join(configDir, configFile)
|
||||
}
|
||||
viper.SetConfigFile(configFile)
|
||||
|
@ -449,7 +457,7 @@ func LoadConfig(configDir, configFile string) error {
|
|||
if strings.TrimSpace(globalConf.FTPD.Banner) == "" {
|
||||
globalConf.FTPD.Banner = defaultFTPDBanner
|
||||
}
|
||||
if globalConf.ProviderConf.UsersBaseDir != "" && !utils.IsFileInputValid(globalConf.ProviderConf.UsersBaseDir) {
|
||||
if globalConf.ProviderConf.UsersBaseDir != "" && !util.IsFileInputValid(globalConf.ProviderConf.UsersBaseDir) {
|
||||
err = fmt.Errorf("invalid users base dir %#v will be ignored", globalConf.ProviderConf.UsersBaseDir)
|
||||
globalConf.ProviderConf.UsersBaseDir = ""
|
||||
logger.Warn(logSender, "", "Configuration error: %v", err)
|
||||
|
@ -488,6 +496,7 @@ func LoadConfig(configDir, configFile string) error {
|
|||
func loadBindingsFromEnv() {
|
||||
for idx := 0; idx < 10; idx++ {
|
||||
getRateLimitersFromEnv(idx)
|
||||
getPluginsFromEnv(idx)
|
||||
getSFTPDBindindFromEnv(idx)
|
||||
getFTPDBindingFromEnv(idx)
|
||||
getWebDAVDBindingFromEnv(idx)
|
||||
|
@ -562,6 +571,65 @@ func getRateLimitersFromEnv(idx int) {
|
|||
}
|
||||
}
|
||||
|
||||
func getPluginsFromEnv(idx int) {
|
||||
pluginConfig := plugin.Config{}
|
||||
if len(globalConf.PluginsConfig) > idx {
|
||||
pluginConfig = globalConf.PluginsConfig[idx]
|
||||
}
|
||||
|
||||
isSet := false
|
||||
|
||||
pluginType, ok := os.LookupEnv(fmt.Sprintf("SFTPGO_PLUGINS__%v__TYPE", idx))
|
||||
if ok {
|
||||
pluginConfig.Type = pluginType
|
||||
isSet = true
|
||||
}
|
||||
|
||||
notifierFsEvents, ok := lookupStringListFromEnv(fmt.Sprintf("SFTPGO_PLUGINS__%v__NOTIFIER_OPTIONS__FS_EVENTS", idx))
|
||||
if ok {
|
||||
pluginConfig.NotifierOptions.FsEvents = notifierFsEvents
|
||||
isSet = true
|
||||
}
|
||||
|
||||
notifierUserEvents, ok := lookupStringListFromEnv(fmt.Sprintf("SFTPGO_PLUGINS__%v__NOTIFIER_OPTIONS__USER_EVENTS", idx))
|
||||
if ok {
|
||||
pluginConfig.NotifierOptions.UserEvents = notifierUserEvents
|
||||
isSet = true
|
||||
}
|
||||
|
||||
cmd, ok := os.LookupEnv(fmt.Sprintf("SFTPGO_PLUGINS__%v__CMD", idx))
|
||||
if ok {
|
||||
pluginConfig.Cmd = cmd
|
||||
isSet = true
|
||||
}
|
||||
|
||||
cmdArgs, ok := lookupStringListFromEnv(fmt.Sprintf("SFTPGO_PLUGINS__%v__ARGS", idx))
|
||||
if ok {
|
||||
pluginConfig.Args = cmdArgs
|
||||
isSet = true
|
||||
}
|
||||
|
||||
pluginHash, ok := os.LookupEnv(fmt.Sprintf("SFTPGO_PLUGINS__%v__SHA256SUM", idx))
|
||||
if ok {
|
||||
pluginConfig.SHA256Sum = pluginHash
|
||||
isSet = true
|
||||
}
|
||||
|
||||
autoMTLS, ok := lookupBoolFromEnv(fmt.Sprintf("SFTPGO_PLUGINS__%v__AUTO_MTLS", idx))
|
||||
if ok {
|
||||
pluginConfig.AutoMTLS = autoMTLS
|
||||
isSet = true
|
||||
}
|
||||
|
||||
if isSet {
|
||||
if len(globalConf.PluginsConfig) > idx {
|
||||
globalConf.PluginsConfig[idx] = pluginConfig
|
||||
} else {
|
||||
globalConf.PluginsConfig = append(globalConf.PluginsConfig, pluginConfig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getSFTPDBindindFromEnv(idx int) {
|
||||
binding := sftpd.Binding{
|
||||
ApplyProxyConfig: true,
|
||||
|
@ -988,7 +1056,10 @@ func lookupStringListFromEnv(envName string) ([]string, bool) {
|
|||
if ok {
|
||||
var result []string
|
||||
for _, v := range strings.Split(value, ",") {
|
||||
result = append(result, strings.TrimSpace(v))
|
||||
val := strings.TrimSpace(v)
|
||||
if val != "" {
|
||||
result = append(result, val)
|
||||
}
|
||||
}
|
||||
return result, true
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/httpclient"
|
||||
"github.com/drakkan/sftpgo/v2/httpd"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -290,6 +290,77 @@ func TestServiceToStart(t *testing.T) {
|
|||
assert.True(t, config.HasServicesToStart())
|
||||
}
|
||||
|
||||
func TestPluginsFromEnv(t *testing.T) {
|
||||
reset()
|
||||
|
||||
os.Setenv("SFTPGO_PLUGINS__0__TYPE", "notifier")
|
||||
os.Setenv("SFTPGO_PLUGINS__0__NOTIFIER_OPTIONS__FS_EVENTS", "upload,download")
|
||||
os.Setenv("SFTPGO_PLUGINS__0__NOTIFIER_OPTIONS__USER_EVENTS", "add,update")
|
||||
os.Setenv("SFTPGO_PLUGINS__0__CMD", "plugin_start_cmd")
|
||||
os.Setenv("SFTPGO_PLUGINS__0__ARGS", "arg1,arg2")
|
||||
os.Setenv("SFTPGO_PLUGINS__0__SHA256SUM", "0a71ded61fccd59c4f3695b51c1b3d180da8d2d77ea09ccee20dac242675c193")
|
||||
os.Setenv("SFTPGO_PLUGINS__0__AUTO_MTLS", "1")
|
||||
t.Cleanup(func() {
|
||||
os.Unsetenv("SFTPGO_PLUGINS__0__TYPE")
|
||||
os.Unsetenv("SFTPGO_PLUGINS__0__NOTIFIER_OPTIONS__FS_EVENTS")
|
||||
os.Unsetenv("SFTPGO_PLUGINS__0__NOTIFIER_OPTIONS__USER_EVENTS")
|
||||
os.Unsetenv("SFTPGO_PLUGINS__0__CMD")
|
||||
os.Unsetenv("SFTPGO_PLUGINS__0__ARGS")
|
||||
os.Unsetenv("SFTPGO_PLUGINS__0__SHA256SUM")
|
||||
os.Unsetenv("SFTPGO_PLUGINS__0__AUTO_MTLS")
|
||||
})
|
||||
|
||||
configDir := ".."
|
||||
err := config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
pluginsConf := config.GetPluginsConfig()
|
||||
require.Len(t, pluginsConf, 1)
|
||||
pluginConf := pluginsConf[0]
|
||||
require.Equal(t, "notifier", pluginConf.Type)
|
||||
require.Len(t, pluginConf.NotifierOptions.FsEvents, 2)
|
||||
require.True(t, util.IsStringInSlice("upload", pluginConf.NotifierOptions.FsEvents))
|
||||
require.True(t, util.IsStringInSlice("download", pluginConf.NotifierOptions.FsEvents))
|
||||
require.Len(t, pluginConf.NotifierOptions.UserEvents, 2)
|
||||
require.Equal(t, "add", pluginConf.NotifierOptions.UserEvents[0])
|
||||
require.Equal(t, "update", pluginConf.NotifierOptions.UserEvents[1])
|
||||
require.Equal(t, "plugin_start_cmd", pluginConf.Cmd)
|
||||
require.Len(t, pluginConf.Args, 2)
|
||||
require.Equal(t, "arg1", pluginConf.Args[0])
|
||||
require.Equal(t, "arg2", pluginConf.Args[1])
|
||||
require.Equal(t, "0a71ded61fccd59c4f3695b51c1b3d180da8d2d77ea09ccee20dac242675c193", pluginConf.SHA256Sum)
|
||||
require.True(t, pluginConf.AutoMTLS)
|
||||
|
||||
configAsJSON, err := json.Marshal(pluginsConf)
|
||||
require.NoError(t, err)
|
||||
confName := tempConfigName + ".json"
|
||||
configFilePath := filepath.Join(configDir, confName)
|
||||
err = os.WriteFile(configFilePath, configAsJSON, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
os.Setenv("SFTPGO_PLUGINS__0__CMD", "plugin_start_cmd1")
|
||||
os.Setenv("SFTPGO_PLUGINS__0__ARGS", "")
|
||||
os.Setenv("SFTPGO_PLUGINS__0__AUTO_MTLS", "0")
|
||||
err = config.LoadConfig(configDir, confName)
|
||||
assert.NoError(t, err)
|
||||
pluginsConf = config.GetPluginsConfig()
|
||||
require.Len(t, pluginsConf, 1)
|
||||
pluginConf = pluginsConf[0]
|
||||
require.Equal(t, "notifier", pluginConf.Type)
|
||||
require.Len(t, pluginConf.NotifierOptions.FsEvents, 2)
|
||||
require.True(t, util.IsStringInSlice("upload", pluginConf.NotifierOptions.FsEvents))
|
||||
require.True(t, util.IsStringInSlice("download", pluginConf.NotifierOptions.FsEvents))
|
||||
require.Len(t, pluginConf.NotifierOptions.UserEvents, 2)
|
||||
require.Equal(t, "add", pluginConf.NotifierOptions.UserEvents[0])
|
||||
require.Equal(t, "update", pluginConf.NotifierOptions.UserEvents[1])
|
||||
require.Equal(t, "plugin_start_cmd1", pluginConf.Cmd)
|
||||
require.Len(t, pluginConf.Args, 0)
|
||||
require.Equal(t, "0a71ded61fccd59c4f3695b51c1b3d180da8d2d77ea09ccee20dac242675c193", pluginConf.SHA256Sum)
|
||||
require.False(t, pluginConf.AutoMTLS)
|
||||
|
||||
err = os.Remove(configFilePath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRateLimitersFromEnv(t *testing.T) {
|
||||
reset()
|
||||
|
||||
|
@ -325,8 +396,8 @@ func TestRateLimitersFromEnv(t *testing.T) {
|
|||
require.Equal(t, 2, limiters[0].Type)
|
||||
protocols := limiters[0].Protocols
|
||||
require.Len(t, protocols, 2)
|
||||
require.True(t, utils.IsStringInSlice(common.ProtocolFTP, protocols))
|
||||
require.True(t, utils.IsStringInSlice(common.ProtocolSSH, protocols))
|
||||
require.True(t, util.IsStringInSlice(common.ProtocolFTP, protocols))
|
||||
require.True(t, util.IsStringInSlice(common.ProtocolSSH, protocols))
|
||||
require.True(t, limiters[0].GenerateDefenderEvents)
|
||||
require.Equal(t, 50, limiters[0].EntriesSoftLimit)
|
||||
require.Equal(t, 100, limiters[0].EntriesHardLimit)
|
||||
|
@ -337,10 +408,10 @@ func TestRateLimitersFromEnv(t *testing.T) {
|
|||
require.Equal(t, 2, limiters[1].Type)
|
||||
protocols = limiters[1].Protocols
|
||||
require.Len(t, protocols, 4)
|
||||
require.True(t, utils.IsStringInSlice(common.ProtocolFTP, protocols))
|
||||
require.True(t, utils.IsStringInSlice(common.ProtocolSSH, protocols))
|
||||
require.True(t, utils.IsStringInSlice(common.ProtocolWebDAV, protocols))
|
||||
require.True(t, utils.IsStringInSlice(common.ProtocolHTTP, protocols))
|
||||
require.True(t, util.IsStringInSlice(common.ProtocolFTP, protocols))
|
||||
require.True(t, util.IsStringInSlice(common.ProtocolSSH, protocols))
|
||||
require.True(t, util.IsStringInSlice(common.ProtocolWebDAV, protocols))
|
||||
require.True(t, util.IsStringInSlice(common.ProtocolHTTP, protocols))
|
||||
require.False(t, limiters[1].GenerateDefenderEvents)
|
||||
require.Equal(t, 100, limiters[1].EntriesSoftLimit)
|
||||
require.Equal(t, 150, limiters[1].EntriesHardLimit)
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/alexedwards/argon2id"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// Available permissions for SFTPGo admins
|
||||
|
@ -66,7 +66,7 @@ type Admin struct {
|
|||
}
|
||||
|
||||
func (a *Admin) checkPassword() error {
|
||||
if a.Password != "" && !utils.IsStringPrefixInSlice(a.Password, internalHashPwdPrefixes) {
|
||||
if a.Password != "" && !util.IsStringPrefixInSlice(a.Password, internalHashPwdPrefixes) {
|
||||
if config.PasswordHashing.Algo == HashingAlgoBcrypt {
|
||||
pwd, err := bcrypt.GenerateFromPassword([]byte(a.Password), config.PasswordHashing.BcryptOptions.Cost)
|
||||
if err != nil {
|
||||
|
@ -86,36 +86,36 @@ func (a *Admin) checkPassword() error {
|
|||
|
||||
func (a *Admin) validate() error {
|
||||
if a.Username == "" {
|
||||
return utils.NewValidationError("username is mandatory")
|
||||
return util.NewValidationError("username is mandatory")
|
||||
}
|
||||
if a.Password == "" {
|
||||
return utils.NewValidationError("please set a password")
|
||||
return util.NewValidationError("please set a password")
|
||||
}
|
||||
if !config.SkipNaturalKeysValidation && !usernameRegex.MatchString(a.Username) {
|
||||
return utils.NewValidationError(fmt.Sprintf("username %#v is not valid, the following characters are allowed: a-zA-Z0-9-_.~", a.Username))
|
||||
return util.NewValidationError(fmt.Sprintf("username %#v is not valid, the following characters are allowed: a-zA-Z0-9-_.~", a.Username))
|
||||
}
|
||||
if err := a.checkPassword(); err != nil {
|
||||
return err
|
||||
}
|
||||
a.Permissions = utils.RemoveDuplicates(a.Permissions)
|
||||
a.Permissions = util.RemoveDuplicates(a.Permissions)
|
||||
if len(a.Permissions) == 0 {
|
||||
return utils.NewValidationError("please grant some permissions to this admin")
|
||||
return util.NewValidationError("please grant some permissions to this admin")
|
||||
}
|
||||
if utils.IsStringInSlice(PermAdminAny, a.Permissions) {
|
||||
if util.IsStringInSlice(PermAdminAny, a.Permissions) {
|
||||
a.Permissions = []string{PermAdminAny}
|
||||
}
|
||||
for _, perm := range a.Permissions {
|
||||
if !utils.IsStringInSlice(perm, validAdminPerms) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid permission: %#v", perm))
|
||||
if !util.IsStringInSlice(perm, validAdminPerms) {
|
||||
return util.NewValidationError(fmt.Sprintf("invalid permission: %#v", perm))
|
||||
}
|
||||
}
|
||||
if a.Email != "" && !emailRegex.MatchString(a.Email) {
|
||||
return utils.NewValidationError(fmt.Sprintf("email %#v is not valid", a.Email))
|
||||
return util.NewValidationError(fmt.Sprintf("email %#v is not valid", a.Email))
|
||||
}
|
||||
for _, IPMask := range a.Filters.AllowList {
|
||||
_, _, err := net.ParseCIDR(IPMask)
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not parse allow list entry %#v : %v", IPMask, err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not parse allow list entry %#v : %v", IPMask, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,10 +182,10 @@ func (a *Admin) HideConfidentialData() {
|
|||
|
||||
// HasPermission returns true if the admin has the specified permission
|
||||
func (a *Admin) HasPermission(perm string) bool {
|
||||
if utils.IsStringInSlice(PermAdminAny, a.Permissions) {
|
||||
if util.IsStringInSlice(PermAdminAny, a.Permissions) {
|
||||
return true
|
||||
}
|
||||
return utils.IsStringInSlice(perm, a.Permissions)
|
||||
return util.IsStringInSlice(perm, a.Permissions)
|
||||
}
|
||||
|
||||
// GetPermissionsAsString returns permission as string
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
bolt "go.etcd.io/bbolt"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
@ -43,7 +43,7 @@ func initializeBoltProvider(basePath string) error {
|
|||
var err error
|
||||
|
||||
dbPath := config.Name
|
||||
if !utils.IsFileInputValid(dbPath) {
|
||||
if !util.IsFileInputValid(dbPath) {
|
||||
return fmt.Errorf("invalid database path: %#v", dbPath)
|
||||
}
|
||||
if !filepath.IsAbs(dbPath) {
|
||||
|
@ -160,14 +160,14 @@ func (p *BoltProvider) updateLastLogin(username string) error {
|
|||
}
|
||||
var u []byte
|
||||
if u = bucket.Get([]byte(username)); u == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to update last login", username))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to update last login", username))
|
||||
}
|
||||
var user User
|
||||
err = json.Unmarshal(u, &user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
user.LastLogin = utils.GetTimeAsMsSinceEpoch(time.Now())
|
||||
user.LastLogin = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
buf, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -190,7 +190,7 @@ func (p *BoltProvider) updateQuota(username string, filesAdd int, sizeAdd int64,
|
|||
}
|
||||
var u []byte
|
||||
if u = bucket.Get([]byte(username)); u == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to update quota", username))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to update quota", username))
|
||||
}
|
||||
var user User
|
||||
err = json.Unmarshal(u, &user)
|
||||
|
@ -204,7 +204,7 @@ func (p *BoltProvider) updateQuota(username string, filesAdd int, sizeAdd int64,
|
|||
user.UsedQuotaSize += sizeAdd
|
||||
user.UsedQuotaFiles += filesAdd
|
||||
}
|
||||
user.LastQuotaUpdate = utils.GetTimeAsMsSinceEpoch(time.Now())
|
||||
user.LastQuotaUpdate = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
buf, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -235,7 +235,7 @@ func (p *BoltProvider) adminExists(username string) (Admin, error) {
|
|||
}
|
||||
a := bucket.Get([]byte(username))
|
||||
if a == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("admin %v does not exist", username))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("admin %v does not exist", username))
|
||||
}
|
||||
return json.Unmarshal(a, &admin)
|
||||
})
|
||||
|
@ -282,7 +282,7 @@ func (p *BoltProvider) updateAdmin(admin *Admin) error {
|
|||
var a []byte
|
||||
|
||||
if a = bucket.Get([]byte(admin.Username)); a == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("admin %v does not exist", admin.Username))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("admin %v does not exist", admin.Username))
|
||||
}
|
||||
var oldAdmin Admin
|
||||
err = json.Unmarshal(a, &oldAdmin)
|
||||
|
@ -307,7 +307,7 @@ func (p *BoltProvider) deleteAdmin(admin *Admin) error {
|
|||
}
|
||||
|
||||
if bucket.Get([]byte(admin.Username)) == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("admin %v does not exist", admin.Username))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("admin %v does not exist", admin.Username))
|
||||
}
|
||||
|
||||
return bucket.Delete([]byte(admin.Username))
|
||||
|
@ -397,7 +397,7 @@ func (p *BoltProvider) userExists(username string) (User, error) {
|
|||
}
|
||||
u := bucket.Get([]byte(username))
|
||||
if u == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
|
||||
}
|
||||
folderBucket, err := getFoldersBucket(tx)
|
||||
if err != nil {
|
||||
|
@ -465,7 +465,7 @@ func (p *BoltProvider) updateUser(user *User) error {
|
|||
}
|
||||
var u []byte
|
||||
if u = bucket.Get([]byte(user.Username)); u == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", user.Username))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", user.Username))
|
||||
}
|
||||
var oldUser User
|
||||
err = json.Unmarshal(u, &oldUser)
|
||||
|
@ -517,7 +517,7 @@ func (p *BoltProvider) deleteUser(user *User) error {
|
|||
}
|
||||
exists := bucket.Get([]byte(user.Username))
|
||||
if exists == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("user %#v does not exist", user.Username))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("user %#v does not exist", user.Username))
|
||||
}
|
||||
return bucket.Delete([]byte(user.Username))
|
||||
})
|
||||
|
@ -722,7 +722,7 @@ func (p *BoltProvider) updateFolder(folder *vfs.BaseVirtualFolder) error {
|
|||
var f []byte
|
||||
|
||||
if f = bucket.Get([]byte(folder.Name)); f == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("folder %v does not exist", folder.Name))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("folder %v does not exist", folder.Name))
|
||||
}
|
||||
var oldFolder vfs.BaseVirtualFolder
|
||||
err = json.Unmarshal(f, &oldFolder)
|
||||
|
@ -755,7 +755,7 @@ func (p *BoltProvider) deleteFolder(folder *vfs.BaseVirtualFolder) error {
|
|||
}
|
||||
var f []byte
|
||||
if f = bucket.Get([]byte(folder.Name)); f == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("folder %v does not exist", folder.Name))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("folder %v does not exist", folder.Name))
|
||||
}
|
||||
var folder vfs.BaseVirtualFolder
|
||||
err = json.Unmarshal(f, &folder)
|
||||
|
@ -801,7 +801,7 @@ func (p *BoltProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int6
|
|||
}
|
||||
var f []byte
|
||||
if f = bucket.Get([]byte(name)); f == nil {
|
||||
return utils.NewRecordNotFoundError(fmt.Sprintf("folder %#v does not exist, unable to update quota", name))
|
||||
return util.NewRecordNotFoundError(fmt.Sprintf("folder %#v does not exist, unable to update quota", name))
|
||||
}
|
||||
var folder vfs.BaseVirtualFolder
|
||||
err = json.Unmarshal(f, &folder)
|
||||
|
@ -815,7 +815,7 @@ func (p *BoltProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int6
|
|||
folder.UsedQuotaSize += sizeAdd
|
||||
folder.UsedQuotaFiles += filesAdd
|
||||
}
|
||||
folder.LastQuotaUpdate = utils.GetTimeAsMsSinceEpoch(time.Now())
|
||||
folder.LastQuotaUpdate = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
buf, err := json.Marshal(folder)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -910,7 +910,7 @@ func folderExistsInternal(name string, bucket *bolt.Bucket) (vfs.BaseVirtualFold
|
|||
var folder vfs.BaseVirtualFolder
|
||||
f := bucket.Get([]byte(name))
|
||||
if f == nil {
|
||||
err := utils.NewRecordNotFoundError(fmt.Sprintf("folder %v does not exist", name))
|
||||
err := util.NewRecordNotFoundError(fmt.Sprintf("folder %v does not exist", name))
|
||||
return folder, err
|
||||
}
|
||||
err := json.Unmarshal(f, &folder)
|
||||
|
@ -950,7 +950,7 @@ func addUserToFolderMapping(baseFolder *vfs.BaseVirtualFolder, user *User, bucke
|
|||
baseFolder.UsedQuotaFiles = oldFolder.UsedQuotaFiles
|
||||
baseFolder.UsedQuotaSize = oldFolder.UsedQuotaSize
|
||||
baseFolder.Users = oldFolder.Users
|
||||
if !utils.IsStringInSlice(user.Username, baseFolder.Users) {
|
||||
if !util.IsStringInSlice(user.Username, baseFolder.Users) {
|
||||
baseFolder.Users = append(baseFolder.Users, user.Username)
|
||||
}
|
||||
buf, err := json.Marshal(baseFolder)
|
||||
|
@ -971,7 +971,7 @@ func removeUserFromFolderMapping(folder *vfs.VirtualFolder, user *User, bucket *
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if utils.IsStringInSlice(user.Username, baseFolder.Users) {
|
||||
if util.IsStringInSlice(user.Username, baseFolder.Users) {
|
||||
var newUserMapping []string
|
||||
for _, u := range baseFolder.Users {
|
||||
if u != user.Username {
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
"golang.org/x/net/webdav"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -54,7 +54,7 @@ func (cache *usersCache) updateLastLogin(username string) {
|
|||
defer cache.Unlock()
|
||||
|
||||
if cachedUser, ok := cache.users[username]; ok {
|
||||
cachedUser.User.LastLogin = utils.GetTimeAsMsSinceEpoch(time.Now())
|
||||
cachedUser.User.LastLogin = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
cache.users[username] = cachedUser
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,8 +46,10 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/httpclient"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -120,7 +122,7 @@ var (
|
|||
// ErrInvalidCredentials defines the error to return if the supplied credentials are invalid
|
||||
ErrInvalidCredentials = errors.New("invalid credentials")
|
||||
isAdminCreated = int32(0)
|
||||
validTLSUsernames = []string{string(TLSUsernameNone), string(TLSUsernameCN)}
|
||||
validTLSUsernames = []string{string(sdk.TLSUsernameNone), string(sdk.TLSUsernameCN)}
|
||||
config Config
|
||||
provider Provider
|
||||
sqlPlaceholders []string
|
||||
|
@ -764,7 +766,7 @@ func CheckKeyboardInteractiveAuth(username, authHook string, client ssh.Keyboard
|
|||
|
||||
// UpdateLastLogin updates the last login fields for the given SFTP user
|
||||
func UpdateLastLogin(user *User) error {
|
||||
lastLogin := utils.GetTimeFromMsecSinceEpoch(user.LastLogin)
|
||||
lastLogin := util.GetTimeFromMsecSinceEpoch(user.LastLogin)
|
||||
diff := -time.Until(lastLogin)
|
||||
if diff < 0 || diff > lastLoginMinDelay {
|
||||
err := provider.updateLastLogin(user.Username)
|
||||
|
@ -780,7 +782,7 @@ func UpdateLastLogin(user *User) error {
|
|||
// If reset is true filesAdd and sizeAdd indicates the total files and the total size instead of the difference.
|
||||
func UpdateUserQuota(user *User, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
if config.TrackQuota == 0 {
|
||||
return utils.NewMethodDisabledError(trackQuotaDisabledError)
|
||||
return util.NewMethodDisabledError(trackQuotaDisabledError)
|
||||
} else if config.TrackQuota == 2 && !reset && !user.HasQuotaRestrictions() {
|
||||
return nil
|
||||
}
|
||||
|
@ -801,7 +803,7 @@ func UpdateUserQuota(user *User, filesAdd int, sizeAdd int64, reset bool) error
|
|||
// If reset is true filesAdd and sizeAdd indicates the total files and the total size instead of the difference.
|
||||
func UpdateVirtualFolderQuota(vfolder *vfs.BaseVirtualFolder, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
if config.TrackQuota == 0 {
|
||||
return utils.NewMethodDisabledError(trackQuotaDisabledError)
|
||||
return util.NewMethodDisabledError(trackQuotaDisabledError)
|
||||
}
|
||||
if filesAdd == 0 && sizeAdd == 0 && !reset {
|
||||
return nil
|
||||
|
@ -819,7 +821,7 @@ func UpdateVirtualFolderQuota(vfolder *vfs.BaseVirtualFolder, filesAdd int, size
|
|||
// GetUsedQuota returns the used quota for the given SFTP user.
|
||||
func GetUsedQuota(username string) (int, int64, error) {
|
||||
if config.TrackQuota == 0 {
|
||||
return 0, 0, utils.NewMethodDisabledError(trackQuotaDisabledError)
|
||||
return 0, 0, util.NewMethodDisabledError(trackQuotaDisabledError)
|
||||
}
|
||||
files, size, err := provider.getUsedQuota(username)
|
||||
if err != nil {
|
||||
|
@ -832,7 +834,7 @@ func GetUsedQuota(username string) (int, int64, error) {
|
|||
// GetUsedVirtualFolderQuota returns the used quota for the given virtual folder.
|
||||
func GetUsedVirtualFolderQuota(name string) (int, int64, error) {
|
||||
if config.TrackQuota == 0 {
|
||||
return 0, 0, utils.NewMethodDisabledError(trackQuotaDisabledError)
|
||||
return 0, 0, util.NewMethodDisabledError(trackQuotaDisabledError)
|
||||
}
|
||||
files, size, err := provider.getUsedFolderQuota(name)
|
||||
if err != nil {
|
||||
|
@ -1064,7 +1066,7 @@ func buildUserHomeDir(user *User) {
|
|||
return
|
||||
}
|
||||
switch user.FsConfig.Provider {
|
||||
case vfs.SFTPFilesystemProvider, vfs.S3FilesystemProvider, vfs.AzureBlobFilesystemProvider, vfs.GCSFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider, sdk.S3FilesystemProvider, sdk.AzureBlobFilesystemProvider, sdk.GCSFilesystemProvider:
|
||||
if tempPath != "" {
|
||||
user.HomeDir = filepath.Join(tempPath, user.Username)
|
||||
} else {
|
||||
|
@ -1114,13 +1116,13 @@ func isMappedDirOverlapped(dir1, dir2 string, fullCheck bool) bool {
|
|||
|
||||
func validateFolderQuotaLimits(folder vfs.VirtualFolder) error {
|
||||
if folder.QuotaSize < -1 {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid quota_size: %v folder path %#v", folder.QuotaSize, folder.MappedPath))
|
||||
return util.NewValidationError(fmt.Sprintf("invalid quota_size: %v folder path %#v", folder.QuotaSize, folder.MappedPath))
|
||||
}
|
||||
if folder.QuotaFiles < -1 {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid quota_file: %v folder path %#v", folder.QuotaFiles, folder.MappedPath))
|
||||
return util.NewValidationError(fmt.Sprintf("invalid quota_file: %v folder path %#v", folder.QuotaFiles, folder.MappedPath))
|
||||
}
|
||||
if (folder.QuotaSize == -1 && folder.QuotaFiles != -1) || (folder.QuotaFiles == -1 && folder.QuotaSize != -1) {
|
||||
return utils.NewValidationError(fmt.Sprintf("virtual folder quota_size and quota_files must be both -1 or >= 0, quota_size: %v quota_files: %v",
|
||||
return util.NewValidationError(fmt.Sprintf("virtual folder quota_size and quota_files must be both -1 or >= 0, quota_size: %v quota_files: %v",
|
||||
folder.QuotaFiles, folder.QuotaSize))
|
||||
}
|
||||
return nil
|
||||
|
@ -1137,7 +1139,7 @@ func getVirtualFolderIfInvalid(folder *vfs.BaseVirtualFolder) *vfs.BaseVirtualFo
|
|||
if folder.Name == "" {
|
||||
return folder
|
||||
}
|
||||
if folder.FsConfig.Provider != vfs.LocalFilesystemProvider {
|
||||
if folder.FsConfig.Provider != sdk.LocalFilesystemProvider {
|
||||
return folder
|
||||
}
|
||||
if f, err := GetFolderByName(folder.Name); err == nil {
|
||||
|
@ -1157,7 +1159,7 @@ func validateUserVirtualFolders(user *User) error {
|
|||
for _, v := range user.VirtualFolders {
|
||||
cleanedVPath := filepath.ToSlash(path.Clean(v.VirtualPath))
|
||||
if !path.IsAbs(cleanedVPath) || cleanedVPath == "/" {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid virtual folder %#v", v.VirtualPath))
|
||||
return util.NewValidationError(fmt.Sprintf("invalid virtual folder %#v", v.VirtualPath))
|
||||
}
|
||||
if err := validateFolderQuotaLimits(v); err != nil {
|
||||
return err
|
||||
|
@ -1169,12 +1171,12 @@ func validateUserVirtualFolders(user *User) error {
|
|||
cleanedMPath := folder.MappedPath
|
||||
if folder.IsLocalOrLocalCrypted() {
|
||||
if isMappedDirOverlapped(cleanedMPath, user.GetHomeDir(), true) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid mapped folder %#v cannot be inside or contain the user home dir %#v",
|
||||
return util.NewValidationError(fmt.Sprintf("invalid mapped folder %#v cannot be inside or contain the user home dir %#v",
|
||||
folder.MappedPath, user.GetHomeDir()))
|
||||
}
|
||||
for mPath := range mappedPaths {
|
||||
if folder.IsLocalOrLocalCrypted() && isMappedDirOverlapped(mPath, cleanedMPath, false) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid mapped folder %#v overlaps with mapped folder %#v",
|
||||
return util.NewValidationError(fmt.Sprintf("invalid mapped folder %#v overlaps with mapped folder %#v",
|
||||
v.MappedPath, mPath))
|
||||
}
|
||||
}
|
||||
|
@ -1182,7 +1184,7 @@ func validateUserVirtualFolders(user *User) error {
|
|||
}
|
||||
for vPath := range virtualPaths {
|
||||
if isVirtualDirOverlapped(vPath, cleanedVPath, false) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid virtual folder %#v overlaps with virtual folder %#v",
|
||||
return util.NewValidationError(fmt.Sprintf("invalid virtual folder %#v overlaps with virtual folder %#v",
|
||||
v.VirtualPath, vPath))
|
||||
}
|
||||
}
|
||||
|
@ -1200,22 +1202,22 @@ func validateUserVirtualFolders(user *User) error {
|
|||
|
||||
func validatePermissions(user *User) error {
|
||||
if len(user.Permissions) == 0 {
|
||||
return utils.NewValidationError("please grant some permissions to this user")
|
||||
return util.NewValidationError("please grant some permissions to this user")
|
||||
}
|
||||
permissions := make(map[string][]string)
|
||||
if _, ok := user.Permissions["/"]; !ok {
|
||||
return utils.NewValidationError("permissions for the root dir \"/\" must be set")
|
||||
return util.NewValidationError("permissions for the root dir \"/\" must be set")
|
||||
}
|
||||
for dir, perms := range user.Permissions {
|
||||
if len(perms) == 0 && dir == "/" {
|
||||
return utils.NewValidationError(fmt.Sprintf("no permissions granted for the directory: %#v", dir))
|
||||
return util.NewValidationError(fmt.Sprintf("no permissions granted for the directory: %#v", dir))
|
||||
}
|
||||
if len(perms) > len(ValidPerms) {
|
||||
return utils.NewValidationError("invalid permissions")
|
||||
return util.NewValidationError("invalid permissions")
|
||||
}
|
||||
for _, p := range perms {
|
||||
if !utils.IsStringInSlice(p, ValidPerms) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid permission: %#v", p))
|
||||
if !util.IsStringInSlice(p, ValidPerms) {
|
||||
return util.NewValidationError(fmt.Sprintf("invalid permission: %#v", p))
|
||||
}
|
||||
}
|
||||
cleanedDir := filepath.ToSlash(path.Clean(dir))
|
||||
|
@ -1223,15 +1225,15 @@ func validatePermissions(user *User) error {
|
|||
cleanedDir = strings.TrimSuffix(cleanedDir, "/")
|
||||
}
|
||||
if !path.IsAbs(cleanedDir) {
|
||||
return utils.NewValidationError(fmt.Sprintf("cannot set permissions for non absolute path: %#v", dir))
|
||||
return util.NewValidationError(fmt.Sprintf("cannot set permissions for non absolute path: %#v", dir))
|
||||
}
|
||||
if dir != cleanedDir && cleanedDir == "/" {
|
||||
return utils.NewValidationError(fmt.Sprintf("cannot set permissions for invalid subdirectory: %#v is an alias for \"/\"", dir))
|
||||
return util.NewValidationError(fmt.Sprintf("cannot set permissions for invalid subdirectory: %#v is an alias for \"/\"", dir))
|
||||
}
|
||||
if utils.IsStringInSlice(PermAny, perms) {
|
||||
if util.IsStringInSlice(PermAny, perms) {
|
||||
permissions[cleanedDir] = []string{PermAny}
|
||||
} else {
|
||||
permissions[cleanedDir] = utils.RemoveDuplicates(perms)
|
||||
permissions[cleanedDir] = util.RemoveDuplicates(perms)
|
||||
}
|
||||
}
|
||||
user.Permissions = permissions
|
||||
|
@ -1249,31 +1251,31 @@ func validatePublicKeys(user *User) error {
|
|||
}
|
||||
_, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k))
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not parse key nr. %d: %s", i+1, err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not parse key nr. %d: %s", i+1, err))
|
||||
}
|
||||
validatedKeys = append(validatedKeys, k)
|
||||
}
|
||||
user.PublicKeys = utils.RemoveDuplicates(validatedKeys)
|
||||
user.PublicKeys = util.RemoveDuplicates(validatedKeys)
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateFiltersPatternExtensions(user *User) error {
|
||||
if len(user.Filters.FilePatterns) == 0 {
|
||||
user.Filters.FilePatterns = []PatternsFilter{}
|
||||
user.Filters.FilePatterns = []sdk.PatternsFilter{}
|
||||
return nil
|
||||
}
|
||||
filteredPaths := []string{}
|
||||
var filters []PatternsFilter
|
||||
var filters []sdk.PatternsFilter
|
||||
for _, f := range user.Filters.FilePatterns {
|
||||
cleanedPath := filepath.ToSlash(path.Clean(f.Path))
|
||||
if !path.IsAbs(cleanedPath) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid path %#v for file patterns filter", f.Path))
|
||||
return util.NewValidationError(fmt.Sprintf("invalid path %#v for file patterns filter", f.Path))
|
||||
}
|
||||
if utils.IsStringInSlice(cleanedPath, filteredPaths) {
|
||||
return utils.NewValidationError(fmt.Sprintf("duplicate file patterns filter for path %#v", f.Path))
|
||||
if util.IsStringInSlice(cleanedPath, filteredPaths) {
|
||||
return util.NewValidationError(fmt.Sprintf("duplicate file patterns filter for path %#v", f.Path))
|
||||
}
|
||||
if len(f.AllowedPatterns) == 0 && len(f.DeniedPatterns) == 0 {
|
||||
return utils.NewValidationError(fmt.Sprintf("empty file patterns filter for path %#v", f.Path))
|
||||
return util.NewValidationError(fmt.Sprintf("empty file patterns filter for path %#v", f.Path))
|
||||
}
|
||||
f.Path = cleanedPath
|
||||
allowed := make([]string, 0, len(f.AllowedPatterns))
|
||||
|
@ -1281,14 +1283,14 @@ func validateFiltersPatternExtensions(user *User) error {
|
|||
for _, pattern := range f.AllowedPatterns {
|
||||
_, err := path.Match(pattern, "abc")
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid file pattern filter %#v", pattern))
|
||||
return util.NewValidationError(fmt.Sprintf("invalid file pattern filter %#v", pattern))
|
||||
}
|
||||
allowed = append(allowed, strings.ToLower(pattern))
|
||||
}
|
||||
for _, pattern := range f.DeniedPatterns {
|
||||
_, err := path.Match(pattern, "abc")
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid file pattern filter %#v", pattern))
|
||||
return util.NewValidationError(fmt.Sprintf("invalid file pattern filter %#v", pattern))
|
||||
}
|
||||
denied = append(denied, strings.ToLower(pattern))
|
||||
}
|
||||
|
@ -1321,46 +1323,46 @@ func validateFilters(user *User) error {
|
|||
for _, IPMask := range user.Filters.DeniedIP {
|
||||
_, _, err := net.ParseCIDR(IPMask)
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not parse denied IP/Mask %#v : %v", IPMask, err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not parse denied IP/Mask %#v : %v", IPMask, err))
|
||||
}
|
||||
}
|
||||
for _, IPMask := range user.Filters.AllowedIP {
|
||||
_, _, err := net.ParseCIDR(IPMask)
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not parse allowed IP/Mask %#v : %v", IPMask, err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not parse allowed IP/Mask %#v : %v", IPMask, err))
|
||||
}
|
||||
}
|
||||
if len(user.Filters.DeniedLoginMethods) >= len(ValidLoginMethods) {
|
||||
return utils.NewValidationError("invalid denied_login_methods")
|
||||
return util.NewValidationError("invalid denied_login_methods")
|
||||
}
|
||||
for _, loginMethod := range user.Filters.DeniedLoginMethods {
|
||||
if !utils.IsStringInSlice(loginMethod, ValidLoginMethods) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid login method: %#v", loginMethod))
|
||||
if !util.IsStringInSlice(loginMethod, ValidLoginMethods) {
|
||||
return util.NewValidationError(fmt.Sprintf("invalid login method: %#v", loginMethod))
|
||||
}
|
||||
}
|
||||
if len(user.Filters.DeniedProtocols) >= len(ValidProtocols) {
|
||||
return utils.NewValidationError("invalid denied_protocols")
|
||||
return util.NewValidationError("invalid denied_protocols")
|
||||
}
|
||||
for _, p := range user.Filters.DeniedProtocols {
|
||||
if !utils.IsStringInSlice(p, ValidProtocols) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid protocol: %#v", p))
|
||||
if !util.IsStringInSlice(p, ValidProtocols) {
|
||||
return util.NewValidationError(fmt.Sprintf("invalid protocol: %#v", p))
|
||||
}
|
||||
}
|
||||
if user.Filters.TLSUsername != "" {
|
||||
if !utils.IsStringInSlice(string(user.Filters.TLSUsername), validTLSUsernames) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid TLS username: %#v", user.Filters.TLSUsername))
|
||||
if !util.IsStringInSlice(string(user.Filters.TLSUsername), validTLSUsernames) {
|
||||
return util.NewValidationError(fmt.Sprintf("invalid TLS username: %#v", user.Filters.TLSUsername))
|
||||
}
|
||||
}
|
||||
for _, opts := range user.Filters.WebClient {
|
||||
if !utils.IsStringInSlice(opts, WebClientOptions) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid web client options %#v", opts))
|
||||
if !util.IsStringInSlice(opts, sdk.WebClientOptions) {
|
||||
return util.NewValidationError(fmt.Sprintf("invalid web client options %#v", opts))
|
||||
}
|
||||
}
|
||||
return validateFiltersPatternExtensions(user)
|
||||
}
|
||||
|
||||
func saveGCSCredentials(fsConfig *vfs.Filesystem, helper vfs.ValidatorHelper) error {
|
||||
if fsConfig.Provider != vfs.GCSFilesystemProvider {
|
||||
if fsConfig.Provider != sdk.GCSFilesystemProvider {
|
||||
return nil
|
||||
}
|
||||
if fsConfig.GCSConfig.Credentials.GetPayload() == "" {
|
||||
|
@ -1380,21 +1382,21 @@ func saveGCSCredentials(fsConfig *vfs.Filesystem, helper vfs.ValidatorHelper) er
|
|||
fsConfig.GCSConfig.Credentials.SetAdditionalData(helper.GetEncryptionAdditionalData())
|
||||
err := fsConfig.GCSConfig.Credentials.Encrypt()
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not encrypt GCS credentials: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not encrypt GCS credentials: %v", err))
|
||||
}
|
||||
}
|
||||
creds, err := json.Marshal(fsConfig.GCSConfig.Credentials)
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not marshal GCS credentials: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not marshal GCS credentials: %v", err))
|
||||
}
|
||||
credentialsFilePath := helper.GetGCSCredentialsFilePath()
|
||||
err = os.MkdirAll(filepath.Dir(credentialsFilePath), 0700)
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not create GCS credentials dir: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not create GCS credentials dir: %v", err))
|
||||
}
|
||||
err = os.WriteFile(credentialsFilePath, creds, 0600)
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not save GCS credentials: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not save GCS credentials: %v", err))
|
||||
}
|
||||
fsConfig.GCSConfig.Credentials = kms.NewEmptySecret()
|
||||
return nil
|
||||
|
@ -1402,20 +1404,20 @@ func saveGCSCredentials(fsConfig *vfs.Filesystem, helper vfs.ValidatorHelper) er
|
|||
|
||||
func validateBaseParams(user *User) error {
|
||||
if user.Username == "" {
|
||||
return utils.NewValidationError("username is mandatory")
|
||||
return util.NewValidationError("username is mandatory")
|
||||
}
|
||||
if !config.SkipNaturalKeysValidation && !usernameRegex.MatchString(user.Username) {
|
||||
return utils.NewValidationError(fmt.Sprintf("username %#v is not valid, the following characters are allowed: a-zA-Z0-9-_.~",
|
||||
return util.NewValidationError(fmt.Sprintf("username %#v is not valid, the following characters are allowed: a-zA-Z0-9-_.~",
|
||||
user.Username))
|
||||
}
|
||||
if user.HomeDir == "" {
|
||||
return utils.NewValidationError("home_dir is mandatory")
|
||||
return util.NewValidationError("home_dir is mandatory")
|
||||
}
|
||||
if user.Password == "" && len(user.PublicKeys) == 0 {
|
||||
return utils.NewValidationError("please set a password or at least a public_key")
|
||||
return util.NewValidationError("please set a password or at least a public_key")
|
||||
}
|
||||
if !filepath.IsAbs(user.HomeDir) {
|
||||
return utils.NewValidationError(fmt.Sprintf("home_dir must be an absolute path, actual value: %v", user.HomeDir))
|
||||
return util.NewValidationError(fmt.Sprintf("home_dir must be an absolute path, actual value: %v", user.HomeDir))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1443,17 +1445,17 @@ func createUserPasswordHash(user *User) error {
|
|||
// FIXME: this should be defined as Folder struct method
|
||||
func ValidateFolder(folder *vfs.BaseVirtualFolder) error {
|
||||
if folder.Name == "" {
|
||||
return utils.NewValidationError("folder name is mandatory")
|
||||
return util.NewValidationError("folder name is mandatory")
|
||||
}
|
||||
if !config.SkipNaturalKeysValidation && !usernameRegex.MatchString(folder.Name) {
|
||||
return utils.NewValidationError(fmt.Sprintf("folder name %#v is not valid, the following characters are allowed: a-zA-Z0-9-_.~",
|
||||
return util.NewValidationError(fmt.Sprintf("folder name %#v is not valid, the following characters are allowed: a-zA-Z0-9-_.~",
|
||||
folder.Name))
|
||||
}
|
||||
if folder.FsConfig.Provider == vfs.LocalFilesystemProvider || folder.FsConfig.Provider == vfs.CryptedFilesystemProvider ||
|
||||
if folder.FsConfig.Provider == sdk.LocalFilesystemProvider || folder.FsConfig.Provider == sdk.CryptedFilesystemProvider ||
|
||||
folder.MappedPath != "" {
|
||||
cleanedMPath := filepath.Clean(folder.MappedPath)
|
||||
if !filepath.IsAbs(cleanedMPath) {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid folder mapped path %#v", folder.MappedPath))
|
||||
return util.NewValidationError(fmt.Sprintf("invalid folder mapped path %#v", folder.MappedPath))
|
||||
}
|
||||
folder.MappedPath = cleanedMPath
|
||||
}
|
||||
|
@ -1487,7 +1489,7 @@ func ValidateUser(user *User) error {
|
|||
return err
|
||||
}
|
||||
if user.Status < 0 || user.Status > 1 {
|
||||
return utils.NewValidationError(fmt.Sprintf("invalid user status: %v", user.Status))
|
||||
return util.NewValidationError(fmt.Sprintf("invalid user status: %v", user.Status))
|
||||
}
|
||||
if err := createUserPasswordHash(user); err != nil {
|
||||
return err
|
||||
|
@ -1505,9 +1507,9 @@ func checkLoginConditions(user *User) error {
|
|||
if user.Status < 1 {
|
||||
return fmt.Errorf("user %#v is disabled", user.Username)
|
||||
}
|
||||
if user.ExpirationDate > 0 && user.ExpirationDate < utils.GetTimeAsMsSinceEpoch(time.Now()) {
|
||||
if user.ExpirationDate > 0 && user.ExpirationDate < util.GetTimeAsMsSinceEpoch(time.Now()) {
|
||||
return fmt.Errorf("user %#v is expired, expiration timestamp: %v current timestamp: %v", user.Username,
|
||||
user.ExpirationDate, utils.GetTimeAsMsSinceEpoch(time.Now()))
|
||||
user.ExpirationDate, util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1534,12 +1536,12 @@ func isPasswordOK(user *User, password string) (bool, error) {
|
|||
return match, ErrInvalidCredentials
|
||||
}
|
||||
match = true
|
||||
} else if utils.IsStringPrefixInSlice(user.Password, pbkdfPwdPrefixes) {
|
||||
} else if util.IsStringPrefixInSlice(user.Password, pbkdfPwdPrefixes) {
|
||||
match, err = comparePbkdf2PasswordAndHash(password, user.Password)
|
||||
if err != nil {
|
||||
return match, err
|
||||
}
|
||||
} else if utils.IsStringPrefixInSlice(user.Password, unixPwdPrefixes) {
|
||||
} else if util.IsStringPrefixInSlice(user.Password, unixPwdPrefixes) {
|
||||
match, err = compareUnixPasswordAndHash(user, password)
|
||||
if err != nil {
|
||||
return match, err
|
||||
|
@ -1558,7 +1560,7 @@ func checkUserAndTLSCertificate(user *User, protocol string, tlsCert *x509.Certi
|
|||
}
|
||||
switch protocol {
|
||||
case "FTP", "DAV":
|
||||
if user.Filters.TLSUsername == TLSUsernameCN {
|
||||
if user.Filters.TLSUsername == sdk.TLSUsernameCN {
|
||||
if user.Username == tlsCert.Subject.CommonName {
|
||||
return *user, nil
|
||||
}
|
||||
|
@ -1664,7 +1666,7 @@ func comparePbkdf2PasswordAndHash(password, hashedPassword string) (bool, error)
|
|||
return false, err
|
||||
}
|
||||
var salt []byte
|
||||
if utils.IsStringPrefixInSlice(hashedPassword, pbkdfPwdB64SaltPrefixes) {
|
||||
if util.IsStringPrefixInSlice(hashedPassword, pbkdfPwdB64SaltPrefixes) {
|
||||
salt, err = base64.StdEncoding.DecodeString(vals[3])
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -1690,7 +1692,7 @@ func addCredentialsToUser(user *User) error {
|
|||
if err := addFolderCredentialsToUser(user); err != nil {
|
||||
return err
|
||||
}
|
||||
if user.FsConfig.Provider != vfs.GCSFilesystemProvider {
|
||||
if user.FsConfig.Provider != sdk.GCSFilesystemProvider {
|
||||
return nil
|
||||
}
|
||||
if user.FsConfig.GCSConfig.AutomaticCredentials > 0 {
|
||||
|
@ -1712,7 +1714,7 @@ func addCredentialsToUser(user *User) error {
|
|||
func addFolderCredentialsToUser(user *User) error {
|
||||
for idx := range user.VirtualFolders {
|
||||
f := &user.VirtualFolders[idx]
|
||||
if f.FsConfig.Provider != vfs.GCSFilesystemProvider {
|
||||
if f.FsConfig.Provider != sdk.GCSFilesystemProvider {
|
||||
continue
|
||||
}
|
||||
if f.FsConfig.GCSConfig.AutomaticCredentials > 0 {
|
||||
|
@ -1780,7 +1782,7 @@ func checkDataprovider() {
|
|||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "check availability error: %v", err)
|
||||
}
|
||||
metrics.UpdateDataProviderAvailability(err)
|
||||
metric.UpdateDataProviderAvailability(err)
|
||||
}
|
||||
|
||||
func terminateInteractiveAuthProgram(cmd *exec.Cmd, isFinished bool) {
|
||||
|
@ -2117,11 +2119,11 @@ func executePreLoginHook(username, loginMethod, ip, protocol string) (User, erro
|
|||
return u, fmt.Errorf("pre-login hook error: %v, elapsed %v", err, time.Since(startTime))
|
||||
}
|
||||
providerLog(logger.LevelDebug, "pre-login hook completed, elapsed: %v", time.Since(startTime))
|
||||
if utils.IsByteArrayEmpty(out) {
|
||||
if util.IsByteArrayEmpty(out) {
|
||||
providerLog(logger.LevelDebug, "empty response from pre-login hook, no modification requested for user %#v id: %v",
|
||||
username, u.ID)
|
||||
if u.ID == 0 {
|
||||
return u, utils.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
|
||||
return u, util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
@ -2230,7 +2232,7 @@ func getExternalAuthResponse(username, password, pkey, keyboardInteractive, ip,
|
|||
var tlsCert string
|
||||
if cert != nil {
|
||||
var err error
|
||||
tlsCert, err = utils.EncodeTLSCertToPem(cert)
|
||||
tlsCert, err = util.EncodeTLSCertToPem(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2285,7 +2287,7 @@ func updateUserFromExtAuthResponse(user *User, password, pkey string) {
|
|||
if password != "" {
|
||||
user.Password = password
|
||||
}
|
||||
if pkey != "" && !utils.IsStringPrefixInSlice(pkey, user.PublicKeys) {
|
||||
if pkey != "" && !util.IsStringPrefixInSlice(pkey, user.PublicKeys) {
|
||||
user.PublicKeys = append(user.PublicKeys, pkey)
|
||||
}
|
||||
}
|
||||
|
@ -2302,7 +2304,7 @@ func doExternalAuth(username, password string, pubKey []byte, keyboardInteractiv
|
|||
return u, nil
|
||||
}
|
||||
|
||||
pkey, err := utils.GetSSHPublicKeyAsString(pubKey)
|
||||
pkey, err := util.GetSSHPublicKeyAsString(pubKey)
|
||||
if err != nil {
|
||||
return user, err
|
||||
}
|
||||
|
@ -2313,11 +2315,11 @@ func doExternalAuth(username, password string, pubKey []byte, keyboardInteractiv
|
|||
return user, fmt.Errorf("external auth error: %v, elapsed: %v", err, time.Since(startTime))
|
||||
}
|
||||
providerLog(logger.LevelDebug, "external auth completed, elapsed: %v", time.Since(startTime))
|
||||
if utils.IsByteArrayEmpty(out) {
|
||||
if util.IsByteArrayEmpty(out) {
|
||||
providerLog(logger.LevelDebug, "empty response from external hook, no modification requested for user %#v id: %v",
|
||||
username, u.ID)
|
||||
if u.ID == 0 {
|
||||
return u, utils.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
|
||||
return u, util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
@ -2361,12 +2363,14 @@ func getUserAndJSONForHook(username string) (User, []byte, error) {
|
|||
var userAsJSON []byte
|
||||
u, err := provider.userExists(username)
|
||||
if err != nil {
|
||||
if _, ok := err.(*utils.RecordNotFoundError); !ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); !ok {
|
||||
return u, userAsJSON, err
|
||||
}
|
||||
u = User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
ID: 0,
|
||||
Username: username,
|
||||
},
|
||||
}
|
||||
}
|
||||
userAsJSON, err = json.Marshal(u)
|
||||
|
@ -2403,7 +2407,8 @@ func executeNotificationCommand(operation string, commandArgs []string, userAsJS
|
|||
}
|
||||
|
||||
func executeAction(operation string, user *User) {
|
||||
if !utils.IsStringInSlice(operation, config.Actions.ExecuteOn) {
|
||||
plugin.Handler.NotifyUserEvent(operation, user)
|
||||
if !util.IsStringInSlice(operation, config.Actions.ExecuteOn) {
|
||||
return
|
||||
}
|
||||
if config.Actions.Hook == "" {
|
||||
|
@ -2411,17 +2416,8 @@ func executeAction(operation string, user *User) {
|
|||
}
|
||||
|
||||
go func() {
|
||||
if operation != operationDelete {
|
||||
var err error
|
||||
u, err := provider.userExists(user.Username)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "unable to get the user to notify for operation %#v: %v", operation, err)
|
||||
return
|
||||
}
|
||||
user = &u
|
||||
}
|
||||
user.PrepareForRendering()
|
||||
userAsJSON, err := json.Marshal(user)
|
||||
userAsJSON, err := user.RenderAsJSON(operation != operationDelete)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "unable to serialize user as JSON for operation %#v: %v", operation, err)
|
||||
return
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -45,7 +45,7 @@ type MemoryProvider struct {
|
|||
|
||||
func initializeMemoryProvider(basePath string) {
|
||||
configFile := ""
|
||||
if utils.IsFileInputValid(config.Name) {
|
||||
if util.IsFileInputValid(config.Name) {
|
||||
configFile = config.Name
|
||||
if !filepath.IsAbs(configFile) {
|
||||
configFile = filepath.Join(basePath, configFile)
|
||||
|
@ -147,7 +147,7 @@ func (p *MemoryProvider) updateLastLogin(username string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
user.LastLogin = utils.GetTimeAsMsSinceEpoch(time.Now())
|
||||
user.LastLogin = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
p.dbHandle.users[user.Username] = user
|
||||
return nil
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ func (p *MemoryProvider) updateQuota(username string, filesAdd int, sizeAdd int6
|
|||
user.UsedQuotaSize += sizeAdd
|
||||
user.UsedQuotaFiles += filesAdd
|
||||
}
|
||||
user.LastQuotaUpdate = utils.GetTimeAsMsSinceEpoch(time.Now())
|
||||
user.LastQuotaUpdate = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
providerLog(logger.LevelDebug, "quota updated for user %#v, files increment: %v size increment: %v is reset? %v",
|
||||
username, filesAdd, sizeAdd, reset)
|
||||
p.dbHandle.users[user.Username] = user
|
||||
|
@ -367,7 +367,7 @@ func (p *MemoryProvider) userExistsInternal(username string) (User, error) {
|
|||
if val, ok := p.dbHandle.users[username]; ok {
|
||||
return val.getACopy(), nil
|
||||
}
|
||||
return User{}, utils.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
|
||||
return User{}, util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
|
||||
}
|
||||
|
||||
func (p *MemoryProvider) addAdmin(admin *Admin) error {
|
||||
|
@ -444,7 +444,7 @@ func (p *MemoryProvider) adminExistsInternal(username string) (Admin, error) {
|
|||
if val, ok := p.dbHandle.admins[username]; ok {
|
||||
return val.getACopy(), nil
|
||||
}
|
||||
return Admin{}, utils.NewRecordNotFoundError(fmt.Sprintf("admin %#v does not exist", username))
|
||||
return Admin{}, util.NewRecordNotFoundError(fmt.Sprintf("admin %#v does not exist", username))
|
||||
}
|
||||
|
||||
func (p *MemoryProvider) dumpAdmins() ([]Admin, error) {
|
||||
|
@ -526,7 +526,7 @@ func (p *MemoryProvider) updateFolderQuota(name string, filesAdd int, sizeAdd in
|
|||
folder.UsedQuotaSize += sizeAdd
|
||||
folder.UsedQuotaFiles += filesAdd
|
||||
}
|
||||
folder.LastQuotaUpdate = utils.GetTimeAsMsSinceEpoch(time.Now())
|
||||
folder.LastQuotaUpdate = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
p.dbHandle.vfolders[name] = folder
|
||||
return nil
|
||||
}
|
||||
|
@ -574,7 +574,7 @@ func (p *MemoryProvider) removeUserFromFolderMapping(folderName, username string
|
|||
|
||||
func (p *MemoryProvider) updateFoldersMappingInternal(folder vfs.BaseVirtualFolder) {
|
||||
p.dbHandle.vfolders[folder.Name] = folder
|
||||
if !utils.IsStringInSlice(folder.Name, p.dbHandle.vfoldersNames) {
|
||||
if !util.IsStringInSlice(folder.Name, p.dbHandle.vfoldersNames) {
|
||||
p.dbHandle.vfoldersNames = append(p.dbHandle.vfoldersNames, folder.Name)
|
||||
sort.Strings(p.dbHandle.vfoldersNames)
|
||||
}
|
||||
|
@ -588,13 +588,13 @@ func (p *MemoryProvider) addOrUpdateFolderInternal(baseFolder *vfs.BaseVirtualFo
|
|||
folder.MappedPath = baseFolder.MappedPath
|
||||
folder.Description = baseFolder.Description
|
||||
folder.FsConfig = baseFolder.FsConfig.GetACopy()
|
||||
if !utils.IsStringInSlice(username, folder.Users) {
|
||||
if !util.IsStringInSlice(username, folder.Users) {
|
||||
folder.Users = append(folder.Users, username)
|
||||
}
|
||||
p.updateFoldersMappingInternal(folder)
|
||||
return folder, nil
|
||||
}
|
||||
if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
folder = baseFolder.GetACopy()
|
||||
folder.ID = p.getNextFolderID()
|
||||
folder.UsedQuotaSize = usedQuotaSize
|
||||
|
@ -611,7 +611,7 @@ func (p *MemoryProvider) folderExistsInternal(name string) (vfs.BaseVirtualFolde
|
|||
if val, ok := p.dbHandle.vfolders[name]; ok {
|
||||
return val, nil
|
||||
}
|
||||
return vfs.BaseVirtualFolder{}, utils.NewRecordNotFoundError(fmt.Sprintf("folder %#v does not exist", name))
|
||||
return vfs.BaseVirtualFolder{}, util.NewRecordNotFoundError(fmt.Sprintf("folder %#v does not exist", name))
|
||||
}
|
||||
|
||||
func (p *MemoryProvider) getFolders(limit, offset int, order string) ([]vfs.BaseVirtualFolder, error) {
|
||||
|
|
|
@ -13,7 +13,8 @@ import (
|
|||
"github.com/cockroachdb/cockroach-go/v2/crdb"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -271,7 +272,7 @@ func sqlCommonUpdateQuota(username string, filesAdd int, sizeAdd int64, reset bo
|
|||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
_, err = stmt.ExecContext(ctx, sizeAdd, filesAdd, utils.GetTimeAsMsSinceEpoch(time.Now()), username)
|
||||
_, err = stmt.ExecContext(ctx, sizeAdd, filesAdd, util.GetTimeAsMsSinceEpoch(time.Now()), username)
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "quota updated for user %#v, files increment: %v size increment: %v is reset? %v",
|
||||
username, filesAdd, sizeAdd, reset)
|
||||
|
@ -312,7 +313,7 @@ func sqlCommonUpdateLastLogin(username string, dbHandle *sql.DB) error {
|
|||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
_, err = stmt.ExecContext(ctx, utils.GetTimeAsMsSinceEpoch(time.Now()), username)
|
||||
_, err = stmt.ExecContext(ctx, util.GetTimeAsMsSinceEpoch(time.Now()), username)
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "last login updated for user %#v", username)
|
||||
} else {
|
||||
|
@ -494,7 +495,7 @@ func getAdminFromDbRow(row sqlScanner) (Admin, error) {
|
|||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return admin, utils.NewRecordNotFoundError(err.Error())
|
||||
return admin, util.NewRecordNotFoundError(err.Error())
|
||||
}
|
||||
return admin, err
|
||||
}
|
||||
|
@ -543,7 +544,7 @@ func getUserFromDbRow(row sqlScanner) (User, error) {
|
|||
&additionalInfo, &description)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return user, utils.NewRecordNotFoundError(err.Error())
|
||||
return user, util.NewRecordNotFoundError(err.Error())
|
||||
}
|
||||
return user, err
|
||||
}
|
||||
|
@ -570,7 +571,7 @@ func getUserFromDbRow(row sqlScanner) (User, error) {
|
|||
user.Permissions = perms
|
||||
}
|
||||
if filters.Valid {
|
||||
var userFilters UserFilters
|
||||
var userFilters sdk.UserFilters
|
||||
err = json.Unmarshal([]byte(filters.String), &userFilters)
|
||||
if err == nil {
|
||||
user.Filters = userFilters
|
||||
|
@ -620,7 +621,7 @@ func sqlCommonGetFolder(ctx context.Context, name string, dbHandle sqlQuerier) (
|
|||
err = row.Scan(&folder.ID, &mappedPath, &folder.UsedQuotaSize, &folder.UsedQuotaFiles, &folder.LastQuotaUpdate,
|
||||
&folder.Name, &description, &fsConfig)
|
||||
if err == sql.ErrNoRows {
|
||||
return folder, utils.NewRecordNotFoundError(err.Error())
|
||||
return folder, util.NewRecordNotFoundError(err.Error())
|
||||
}
|
||||
if mappedPath.Valid {
|
||||
folder.MappedPath = mappedPath.String
|
||||
|
@ -998,7 +999,7 @@ func sqlCommonUpdateFolderQuota(name string, filesAdd int, sizeAdd int64, reset
|
|||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
_, err = stmt.ExecContext(ctx, sizeAdd, filesAdd, utils.GetTimeAsMsSinceEpoch(time.Now()), name)
|
||||
_, err = stmt.ExecContext(ctx, sizeAdd, filesAdd, util.GetTimeAsMsSinceEpoch(time.Now()), name)
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "quota updated for folder %#v, files increment: %v size increment: %v is reset? %v",
|
||||
name, filesAdd, sizeAdd, reset)
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
_ "github.com/mattn/go-sqlite3"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
@ -60,7 +60,7 @@ func initializeSQLiteProvider(basePath string) error {
|
|||
|
||||
if config.ConnectionString == "" {
|
||||
dbPath := config.Name
|
||||
if !utils.IsFileInputValid(dbPath) {
|
||||
if !util.IsFileInputValid(dbPath) {
|
||||
return fmt.Errorf("invalid database path: %#v", dbPath)
|
||||
}
|
||||
if !filepath.IsAbs(dbPath) {
|
||||
|
|
|
@ -17,7 +17,8 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -50,16 +51,6 @@ const (
|
|||
PermChtimes = "chtimes"
|
||||
)
|
||||
|
||||
// Web Client restrictions
|
||||
const (
|
||||
WebClientPubKeyChangeDisabled = "publickey-change-disabled"
|
||||
)
|
||||
|
||||
var (
|
||||
// WebClientOptions defines the available options for the web client interface
|
||||
WebClientOptions = []string{WebClientPubKeyChangeDisabled}
|
||||
)
|
||||
|
||||
// Available login methods
|
||||
const (
|
||||
LoginMethodNoAuthTryed = "no_auth_tryed"
|
||||
|
@ -72,168 +63,17 @@ const (
|
|||
LoginMethodTLSCertificateAndPwd = "TLSCertificate+password"
|
||||
)
|
||||
|
||||
// TLSUsername defines the TLS certificate attribute to use as username
|
||||
type TLSUsername string
|
||||
|
||||
// Supported certificate attributes to use as username
|
||||
const (
|
||||
TLSUsernameNone TLSUsername = "None"
|
||||
TLSUsernameCN TLSUsername = "CommonName"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoMatchingVirtualFolder = errors.New("no matching virtual folder found")
|
||||
)
|
||||
|
||||
// DirectoryPermissions defines permissions for a directory path
|
||||
type DirectoryPermissions struct {
|
||||
Path string
|
||||
Permissions []string
|
||||
}
|
||||
|
||||
// HasPerm returns true if the directory has the specified permissions
|
||||
func (d *DirectoryPermissions) HasPerm(perm string) bool {
|
||||
return utils.IsStringInSlice(perm, d.Permissions)
|
||||
}
|
||||
|
||||
// PatternsFilter defines filters based on shell like patterns.
|
||||
// These restrictions do not apply to files listing for performance reasons, so
|
||||
// a denied file cannot be downloaded/overwritten/renamed but will still be
|
||||
// in the list of files.
|
||||
// System commands such as Git and rsync interacts with the filesystem directly
|
||||
// and they are not aware about these restrictions so they are not allowed
|
||||
// inside paths with extensions filters
|
||||
type PatternsFilter struct {
|
||||
// Virtual path, if no other specific filter is defined, the filter apply for
|
||||
// sub directories too.
|
||||
// For example if filters are defined for the paths "/" and "/sub" then the
|
||||
// filters for "/" are applied for any file outside the "/sub" directory
|
||||
Path string `json:"path"`
|
||||
// files with these, case insensitive, patterns are allowed.
|
||||
// Denied file patterns are evaluated before the allowed ones
|
||||
AllowedPatterns []string `json:"allowed_patterns,omitempty"`
|
||||
// files with these, case insensitive, patterns are not allowed.
|
||||
// Denied file patterns are evaluated before the allowed ones
|
||||
DeniedPatterns []string `json:"denied_patterns,omitempty"`
|
||||
}
|
||||
|
||||
// GetCommaSeparatedPatterns returns the first non empty patterns list comma separated
|
||||
func (p *PatternsFilter) GetCommaSeparatedPatterns() string {
|
||||
if len(p.DeniedPatterns) > 0 {
|
||||
return strings.Join(p.DeniedPatterns, ",")
|
||||
}
|
||||
return strings.Join(p.AllowedPatterns, ",")
|
||||
}
|
||||
|
||||
// IsDenied returns true if the patterns has one or more denied patterns
|
||||
func (p *PatternsFilter) IsDenied() bool {
|
||||
return len(p.DeniedPatterns) > 0
|
||||
}
|
||||
|
||||
// IsAllowed returns true if the patterns has one or more allowed patterns
|
||||
func (p *PatternsFilter) IsAllowed() bool {
|
||||
return len(p.AllowedPatterns) > 0
|
||||
}
|
||||
|
||||
// HooksFilter defines user specific overrides for global hooks
|
||||
type HooksFilter struct {
|
||||
ExternalAuthDisabled bool `json:"external_auth_disabled"`
|
||||
PreLoginDisabled bool `json:"pre_login_disabled"`
|
||||
CheckPasswordDisabled bool `json:"check_password_disabled"`
|
||||
}
|
||||
|
||||
// UserFilters defines additional restrictions for a user
|
||||
// TODO: rename to UserOptions in v3
|
||||
type UserFilters struct {
|
||||
// only clients connecting from these IP/Mask are allowed.
|
||||
// IP/Mask must be in CIDR notation as defined in RFC 4632 and RFC 4291
|
||||
// for example "192.0.2.0/24" or "2001:db8::/32"
|
||||
AllowedIP []string `json:"allowed_ip,omitempty"`
|
||||
// clients connecting from these IP/Mask are not allowed.
|
||||
// Denied rules will be evaluated before allowed ones
|
||||
DeniedIP []string `json:"denied_ip,omitempty"`
|
||||
// these login methods are not allowed.
|
||||
// If null or empty any available login method is allowed
|
||||
DeniedLoginMethods []string `json:"denied_login_methods,omitempty"`
|
||||
// these protocols are not allowed.
|
||||
// If null or empty any available protocol is allowed
|
||||
DeniedProtocols []string `json:"denied_protocols,omitempty"`
|
||||
// filter based on shell patterns.
|
||||
// Please note that these restrictions can be easily bypassed.
|
||||
FilePatterns []PatternsFilter `json:"file_patterns,omitempty"`
|
||||
// max size allowed for a single upload, 0 means unlimited
|
||||
MaxUploadFileSize int64 `json:"max_upload_file_size,omitempty"`
|
||||
// TLS certificate attribute to use as username.
|
||||
// For FTP clients it must match the name provided using the
|
||||
// "USER" command
|
||||
TLSUsername TLSUsername `json:"tls_username,omitempty"`
|
||||
// user specific hook overrides
|
||||
Hooks HooksFilter `json:"hooks,omitempty"`
|
||||
// Disable checks for existence and automatic creation of home directory
|
||||
// and virtual folders.
|
||||
// SFTPGo requires that the user's home directory, virtual folder root,
|
||||
// and intermediate paths to virtual folders exist to work properly.
|
||||
// If you already know that the required directories exist, disabling
|
||||
// these checks will speed up login.
|
||||
// You could, for example, disable these checks after the first login
|
||||
DisableFsChecks bool `json:"disable_fs_checks,omitempty"`
|
||||
// WebClient related configuration options
|
||||
WebClient []string `json:"web_client,omitempty"`
|
||||
}
|
||||
|
||||
// User defines a SFTPGo user
|
||||
type User struct {
|
||||
// Database unique identifier
|
||||
ID int64 `json:"id"`
|
||||
// 1 enabled, 0 disabled (login is not allowed)
|
||||
Status int `json:"status"`
|
||||
// Username
|
||||
Username string `json:"username"`
|
||||
// Account expiration date as unix timestamp in milliseconds. An expired account cannot login.
|
||||
// 0 means no expiration
|
||||
ExpirationDate int64 `json:"expiration_date"`
|
||||
// Password used for password authentication.
|
||||
// For users created using SFTPGo REST API the password is be stored using bcrypt or argon2id hashing algo.
|
||||
// Checking passwords stored with pbkdf2, md5crypt and sha512crypt is supported too.
|
||||
Password string `json:"password,omitempty"`
|
||||
// PublicKeys used for public key authentication. At least one between password and a public key is mandatory
|
||||
PublicKeys []string `json:"public_keys,omitempty"`
|
||||
// The user cannot upload or download files outside this directory. Must be an absolute path
|
||||
HomeDir string `json:"home_dir"`
|
||||
sdk.BaseUser
|
||||
// Mapping between virtual paths and virtual folders
|
||||
VirtualFolders []vfs.VirtualFolder `json:"virtual_folders,omitempty"`
|
||||
// If sftpgo runs as root system user then the created files and directories will be assigned to this system UID
|
||||
UID int `json:"uid"`
|
||||
// If sftpgo runs as root system user then the created files and directories will be assigned to this system GID
|
||||
GID int `json:"gid"`
|
||||
// Maximum concurrent sessions. 0 means unlimited
|
||||
MaxSessions int `json:"max_sessions"`
|
||||
// Maximum size allowed as bytes. 0 means unlimited
|
||||
QuotaSize int64 `json:"quota_size"`
|
||||
// Maximum number of files allowed. 0 means unlimited
|
||||
QuotaFiles int `json:"quota_files"`
|
||||
// List of the granted permissions
|
||||
Permissions map[string][]string `json:"permissions"`
|
||||
// Used quota as bytes
|
||||
UsedQuotaSize int64 `json:"used_quota_size"`
|
||||
// Used quota as number of files
|
||||
UsedQuotaFiles int `json:"used_quota_files"`
|
||||
// Last quota update as unix timestamp in milliseconds
|
||||
LastQuotaUpdate int64 `json:"last_quota_update"`
|
||||
// Maximum upload bandwidth as KB/s, 0 means unlimited
|
||||
UploadBandwidth int64 `json:"upload_bandwidth"`
|
||||
// Maximum download bandwidth as KB/s, 0 means unlimited
|
||||
DownloadBandwidth int64 `json:"download_bandwidth"`
|
||||
// Last login as unix timestamp in milliseconds
|
||||
LastLogin int64 `json:"last_login"`
|
||||
// Additional restrictions
|
||||
Filters UserFilters `json:"filters"`
|
||||
// Filesystem configuration details
|
||||
FsConfig vfs.Filesystem `json:"filesystem"`
|
||||
// optional description, for example full name
|
||||
Description string `json:"description,omitempty"`
|
||||
// free form text field for external systems
|
||||
AdditionalInfo string `json:"additional_info,omitempty"`
|
||||
// we store the filesystem here using the base path as key.
|
||||
fsCache map[string]vfs.Fs `json:"-"`
|
||||
}
|
||||
|
@ -251,17 +91,17 @@ func (u *User) GetFilesystem(connectionID string) (fs vfs.Fs, err error) {
|
|||
|
||||
func (u *User) getRootFs(connectionID string) (fs vfs.Fs, err error) {
|
||||
switch u.FsConfig.Provider {
|
||||
case vfs.S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
return vfs.NewS3Fs(connectionID, u.GetHomeDir(), "", u.FsConfig.S3Config)
|
||||
case vfs.GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
config := u.FsConfig.GCSConfig
|
||||
config.CredentialFile = u.GetGCSCredentialsFilePath()
|
||||
return vfs.NewGCSFs(connectionID, u.GetHomeDir(), "", config)
|
||||
case vfs.AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
return vfs.NewAzBlobFs(connectionID, u.GetHomeDir(), "", u.FsConfig.AzBlobConfig)
|
||||
case vfs.CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
return vfs.NewCryptFs(connectionID, u.GetHomeDir(), "", u.FsConfig.CryptConfig)
|
||||
case vfs.SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
forbiddenSelfUsers, err := u.getForbiddenSFTPSelfUsers(u.FsConfig.SFTPConfig.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -308,7 +148,7 @@ func (u *User) CheckFsRoot(connectionID string) error {
|
|||
|
||||
// isFsEqual returns true if the fs has the same configuration
|
||||
func (u *User) isFsEqual(other *User) bool {
|
||||
if u.FsConfig.Provider == vfs.LocalFilesystemProvider && u.GetHomeDir() != other.GetHomeDir() {
|
||||
if u.FsConfig.Provider == sdk.LocalFilesystemProvider && u.GetHomeDir() != other.GetHomeDir() {
|
||||
return false
|
||||
}
|
||||
if !u.FsConfig.IsEqual(&other.FsConfig) {
|
||||
|
@ -324,7 +164,7 @@ func (u *User) isFsEqual(other *User) bool {
|
|||
f1 := &other.VirtualFolders[idx1]
|
||||
if f.VirtualPath == f1.VirtualPath {
|
||||
found = true
|
||||
if f.FsConfig.Provider == vfs.LocalFilesystemProvider && f.MappedPath != f1.MappedPath {
|
||||
if f.FsConfig.Provider == sdk.LocalFilesystemProvider && f.MappedPath != f1.MappedPath {
|
||||
return false
|
||||
}
|
||||
if !f.FsConfig.IsEqual(&f1.FsConfig) {
|
||||
|
@ -346,13 +186,13 @@ func (u *User) hideConfidentialData() {
|
|||
}
|
||||
|
||||
// GetSubDirPermissions returns permissions for sub directories
|
||||
func (u *User) GetSubDirPermissions() []DirectoryPermissions {
|
||||
var result []DirectoryPermissions
|
||||
func (u *User) GetSubDirPermissions() []sdk.DirectoryPermissions {
|
||||
var result []sdk.DirectoryPermissions
|
||||
for k, v := range u.Permissions {
|
||||
if k == "/" {
|
||||
continue
|
||||
}
|
||||
dirPerms := DirectoryPermissions{
|
||||
dirPerms := sdk.DirectoryPermissions{
|
||||
Path: k,
|
||||
Permissions: v,
|
||||
}
|
||||
|
@ -361,6 +201,21 @@ func (u *User) GetSubDirPermissions() []DirectoryPermissions {
|
|||
return result
|
||||
}
|
||||
|
||||
// RenderAsJSON implements the renderer interface used within plugins
|
||||
func (u *User) RenderAsJSON(reload bool) ([]byte, error) {
|
||||
if reload {
|
||||
user, err := provider.userExists(u.Username)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "unable to reload user before rendering as json: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
user.PrepareForRendering()
|
||||
return json.Marshal(user)
|
||||
}
|
||||
u.PrepareForRendering()
|
||||
return json.Marshal(u)
|
||||
}
|
||||
|
||||
// PrepareForRendering prepares a user for rendering.
|
||||
// It hides confidential data and set to nil the empty secrets
|
||||
// so they are not serialized
|
||||
|
@ -406,14 +261,14 @@ func (u *User) CloseFs() error {
|
|||
|
||||
// IsPasswordHashed returns true if the password is hashed
|
||||
func (u *User) IsPasswordHashed() bool {
|
||||
return utils.IsStringPrefixInSlice(u.Password, hashPwdPrefixes)
|
||||
return util.IsStringPrefixInSlice(u.Password, hashPwdPrefixes)
|
||||
}
|
||||
|
||||
// IsTLSUsernameVerificationEnabled returns true if we need to extract the username
|
||||
// from the client TLS certificate
|
||||
func (u *User) IsTLSUsernameVerificationEnabled() bool {
|
||||
if u.Filters.TLSUsername != "" {
|
||||
return u.Filters.TLSUsername != TLSUsernameNone
|
||||
return u.Filters.TLSUsername != sdk.TLSUsernameNone
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -445,7 +300,7 @@ func (u *User) GetPermissionsForPath(p string) []string {
|
|||
// fallback permissions
|
||||
permissions = perms
|
||||
}
|
||||
dirsForPath := utils.GetDirsForVirtualPath(p)
|
||||
dirsForPath := util.GetDirsForVirtualPath(p)
|
||||
// dirsForPath contains all the dirs for a given path in reverse order
|
||||
// for example if the path is: /1/2/3/4 it contains:
|
||||
// [ "/1/2/3/4", "/1/2/3", "/1/2", "/1", "/" ]
|
||||
|
@ -464,20 +319,20 @@ func (u *User) getForbiddenSFTPSelfUsers(username string) ([]string, error) {
|
|||
if err == nil {
|
||||
// we don't allow local nested SFTP folders
|
||||
var forbiddens []string
|
||||
if sftpUser.FsConfig.Provider == vfs.SFTPFilesystemProvider {
|
||||
if sftpUser.FsConfig.Provider == sdk.SFTPFilesystemProvider {
|
||||
forbiddens = append(forbiddens, sftpUser.Username)
|
||||
return forbiddens, nil
|
||||
}
|
||||
for idx := range sftpUser.VirtualFolders {
|
||||
v := &sftpUser.VirtualFolders[idx]
|
||||
if v.FsConfig.Provider == vfs.SFTPFilesystemProvider {
|
||||
if v.FsConfig.Provider == sdk.SFTPFilesystemProvider {
|
||||
forbiddens = append(forbiddens, sftpUser.Username)
|
||||
return forbiddens, nil
|
||||
}
|
||||
}
|
||||
return forbiddens, nil
|
||||
}
|
||||
if _, ok := err.(*utils.RecordNotFoundError); !ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); !ok {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -508,7 +363,7 @@ func (u *User) GetFilesystemForPath(virtualPath, connectionID string) (vfs.Fs, e
|
|||
return fs, nil
|
||||
}
|
||||
forbiddenSelfUsers := []string{u.Username}
|
||||
if folder.FsConfig.Provider == vfs.SFTPFilesystemProvider {
|
||||
if folder.FsConfig.Provider == sdk.SFTPFilesystemProvider {
|
||||
forbiddens, err := u.getForbiddenSFTPSelfUsers(folder.FsConfig.SFTPConfig.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -537,7 +392,7 @@ func (u *User) GetVirtualFolderForPath(virtualPath string) (vfs.VirtualFolder, e
|
|||
if len(u.VirtualFolders) == 0 {
|
||||
return folder, errNoMatchingVirtualFolder
|
||||
}
|
||||
dirsForPath := utils.GetDirsForVirtualPath(virtualPath)
|
||||
dirsForPath := util.GetDirsForVirtualPath(virtualPath)
|
||||
for index := range dirsForPath {
|
||||
for idx := range u.VirtualFolders {
|
||||
v := &u.VirtualFolders[idx]
|
||||
|
@ -584,7 +439,7 @@ func (u *User) GetVirtualFoldersInPath(virtualPath string) map[string]bool {
|
|||
|
||||
for idx := range u.VirtualFolders {
|
||||
v := &u.VirtualFolders[idx]
|
||||
dirsForPath := utils.GetDirsForVirtualPath(v.VirtualPath)
|
||||
dirsForPath := util.GetDirsForVirtualPath(v.VirtualPath)
|
||||
for index := range dirsForPath {
|
||||
d := dirsForPath[index]
|
||||
if d == "/" {
|
||||
|
@ -680,20 +535,20 @@ func (u *User) HasPermissionsInside(virtualPath string) bool {
|
|||
// HasPerm returns true if the user has the given permission or any permission
|
||||
func (u *User) HasPerm(permission, path string) bool {
|
||||
perms := u.GetPermissionsForPath(path)
|
||||
if utils.IsStringInSlice(PermAny, perms) {
|
||||
if util.IsStringInSlice(PermAny, perms) {
|
||||
return true
|
||||
}
|
||||
return utils.IsStringInSlice(permission, perms)
|
||||
return util.IsStringInSlice(permission, perms)
|
||||
}
|
||||
|
||||
// HasPerms return true if the user has all the given permissions
|
||||
func (u *User) HasPerms(permissions []string, path string) bool {
|
||||
perms := u.GetPermissionsForPath(path)
|
||||
if utils.IsStringInSlice(PermAny, perms) {
|
||||
if util.IsStringInSlice(PermAny, perms) {
|
||||
return true
|
||||
}
|
||||
for _, permission := range permissions {
|
||||
if !utils.IsStringInSlice(permission, perms) {
|
||||
if !util.IsStringInSlice(permission, perms) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -720,7 +575,7 @@ func (u *User) IsLoginMethodAllowed(loginMethod string, partialSuccessMethods []
|
|||
}
|
||||
}
|
||||
}
|
||||
if utils.IsStringInSlice(loginMethod, u.Filters.DeniedLoginMethods) {
|
||||
if util.IsStringInSlice(loginMethod, u.Filters.DeniedLoginMethods) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
@ -760,7 +615,7 @@ func (u *User) IsPartialAuth(loginMethod string) bool {
|
|||
if method == LoginMethodTLSCertificate || method == LoginMethodTLSCertificateAndPwd {
|
||||
continue
|
||||
}
|
||||
if !utils.IsStringInSlice(method, SSHMultiStepsLoginMethods) {
|
||||
if !util.IsStringInSlice(method, SSHMultiStepsLoginMethods) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -771,7 +626,7 @@ func (u *User) IsPartialAuth(loginMethod string) bool {
|
|||
func (u *User) GetAllowedLoginMethods() []string {
|
||||
var allowedMethods []string
|
||||
for _, method := range ValidLoginMethods {
|
||||
if !utils.IsStringInSlice(method, u.Filters.DeniedLoginMethods) {
|
||||
if !util.IsStringInSlice(method, u.Filters.DeniedLoginMethods) {
|
||||
allowedMethods = append(allowedMethods, method)
|
||||
}
|
||||
}
|
||||
|
@ -780,18 +635,18 @@ func (u *User) GetAllowedLoginMethods() []string {
|
|||
|
||||
// GetFlatFilePatterns returns file patterns as flat list
|
||||
// duplicating a path if it has both allowed and denied patterns
|
||||
func (u *User) GetFlatFilePatterns() []PatternsFilter {
|
||||
var result []PatternsFilter
|
||||
func (u *User) GetFlatFilePatterns() []sdk.PatternsFilter {
|
||||
var result []sdk.PatternsFilter
|
||||
|
||||
for _, pattern := range u.Filters.FilePatterns {
|
||||
if len(pattern.AllowedPatterns) > 0 {
|
||||
result = append(result, PatternsFilter{
|
||||
result = append(result, sdk.PatternsFilter{
|
||||
Path: pattern.Path,
|
||||
AllowedPatterns: pattern.AllowedPatterns,
|
||||
})
|
||||
}
|
||||
if len(pattern.DeniedPatterns) > 0 {
|
||||
result = append(result, PatternsFilter{
|
||||
result = append(result, sdk.PatternsFilter{
|
||||
Path: pattern.Path,
|
||||
DeniedPatterns: pattern.DeniedPatterns,
|
||||
})
|
||||
|
@ -809,8 +664,8 @@ func (u *User) isFilePatternAllowed(virtualPath string) bool {
|
|||
if len(u.Filters.FilePatterns) == 0 {
|
||||
return true
|
||||
}
|
||||
dirsForPath := utils.GetDirsForVirtualPath(path.Dir(virtualPath))
|
||||
var filter PatternsFilter
|
||||
dirsForPath := util.GetDirsForVirtualPath(path.Dir(virtualPath))
|
||||
var filter sdk.PatternsFilter
|
||||
for _, dir := range dirsForPath {
|
||||
for _, f := range u.Filters.FilePatterns {
|
||||
if f.Path == dir {
|
||||
|
@ -844,7 +699,7 @@ func (u *User) isFilePatternAllowed(virtualPath string) bool {
|
|||
// CanManagePublicKeys return true if this user is allowed to manage public keys
|
||||
// from the web client
|
||||
func (u *User) CanManagePublicKeys() bool {
|
||||
return !utils.IsStringInSlice(WebClientPubKeyChangeDisabled, u.Filters.WebClient)
|
||||
return !util.IsStringInSlice(sdk.WebClientPubKeyChangeDisabled, u.Filters.WebClient)
|
||||
}
|
||||
|
||||
// GetSignature returns a signature for this admin.
|
||||
|
@ -864,7 +719,7 @@ func (u *User) IsLoginFromAddrAllowed(remoteAddr string) bool {
|
|||
if len(u.Filters.AllowedIP) == 0 && len(u.Filters.DeniedIP) == 0 {
|
||||
return true
|
||||
}
|
||||
remoteIP := net.ParseIP(utils.GetIPFromRemoteAddress(remoteAddr))
|
||||
remoteIP := net.ParseIP(util.GetIPFromRemoteAddress(remoteAddr))
|
||||
// if remoteIP is invalid we allow login, this should never happen
|
||||
if remoteIP == nil {
|
||||
logger.Warn(logSender, "", "login allowed for invalid IP. remote address: %#v", remoteAddr)
|
||||
|
@ -945,13 +800,13 @@ func (u *User) GetQuotaSummary() string {
|
|||
result += "/" + strconv.Itoa(u.QuotaFiles)
|
||||
}
|
||||
if u.UsedQuotaSize > 0 || u.QuotaSize > 0 {
|
||||
result += ". Size: " + utils.ByteCountIEC(u.UsedQuotaSize)
|
||||
result += ". Size: " + util.ByteCountIEC(u.UsedQuotaSize)
|
||||
if u.QuotaSize > 0 {
|
||||
result += "/" + utils.ByteCountIEC(u.QuotaSize)
|
||||
result += "/" + util.ByteCountIEC(u.QuotaSize)
|
||||
}
|
||||
}
|
||||
if u.LastQuotaUpdate > 0 {
|
||||
t := utils.GetTimeFromMsecSinceEpoch(u.LastQuotaUpdate)
|
||||
t := util.GetTimeFromMsecSinceEpoch(u.LastQuotaUpdate)
|
||||
result += fmt.Sprintf(". Last update: %v ", t.Format("2006-01-02 15:04")) // YYYY-MM-DD HH:MM
|
||||
}
|
||||
return result
|
||||
|
@ -983,13 +838,13 @@ func (u *User) GetPermissionsAsString() string {
|
|||
func (u *User) GetBandwidthAsString() string {
|
||||
result := "DL: "
|
||||
if u.DownloadBandwidth > 0 {
|
||||
result += utils.ByteCountIEC(u.DownloadBandwidth*1000) + "/s."
|
||||
result += util.ByteCountIEC(u.DownloadBandwidth*1000) + "/s."
|
||||
} else {
|
||||
result += "unlimited."
|
||||
}
|
||||
result += " UL: "
|
||||
if u.UploadBandwidth > 0 {
|
||||
result += utils.ByteCountIEC(u.UploadBandwidth*1000) + "/s."
|
||||
result += util.ByteCountIEC(u.UploadBandwidth*1000) + "/s."
|
||||
} else {
|
||||
result += "unlimited."
|
||||
}
|
||||
|
@ -1002,10 +857,10 @@ func (u *User) GetBandwidthAsString() string {
|
|||
func (u *User) GetInfoString() string {
|
||||
var result string
|
||||
if u.LastLogin > 0 {
|
||||
t := utils.GetTimeFromMsecSinceEpoch(u.LastLogin)
|
||||
t := util.GetTimeFromMsecSinceEpoch(u.LastLogin)
|
||||
result += fmt.Sprintf("Last login: %v ", t.Format("2006-01-02 15:04")) // YYYY-MM-DD HH:MM
|
||||
}
|
||||
if u.FsConfig.Provider != vfs.LocalFilesystemProvider {
|
||||
if u.FsConfig.Provider != sdk.LocalFilesystemProvider {
|
||||
result += fmt.Sprintf("Storage: %s ", u.FsConfig.Provider.ShortInfo())
|
||||
}
|
||||
if len(u.PublicKeys) > 0 {
|
||||
|
@ -1031,7 +886,7 @@ func (u *User) GetInfoString() string {
|
|||
|
||||
// GetStatusAsString returns the user status as a string
|
||||
func (u *User) GetStatusAsString() string {
|
||||
if u.ExpirationDate > 0 && u.ExpirationDate < utils.GetTimeAsMsSinceEpoch(time.Now()) {
|
||||
if u.ExpirationDate > 0 && u.ExpirationDate < util.GetTimeAsMsSinceEpoch(time.Now()) {
|
||||
return "Expired"
|
||||
}
|
||||
if u.Status == 1 {
|
||||
|
@ -1043,7 +898,7 @@ func (u *User) GetStatusAsString() string {
|
|||
// GetExpirationDateAsString returns expiration date formatted as YYYY-MM-DD
|
||||
func (u *User) GetExpirationDateAsString() string {
|
||||
if u.ExpirationDate > 0 {
|
||||
t := utils.GetTimeFromMsecSinceEpoch(u.ExpirationDate)
|
||||
t := util.GetTimeFromMsecSinceEpoch(u.ExpirationDate)
|
||||
return t.Format("2006-01-02")
|
||||
}
|
||||
return ""
|
||||
|
@ -1083,7 +938,7 @@ func (u *User) getACopy() User {
|
|||
copy(perms, v)
|
||||
permissions[k] = perms
|
||||
}
|
||||
filters := UserFilters{}
|
||||
filters := sdk.UserFilters{}
|
||||
filters.MaxUploadFileSize = u.Filters.MaxUploadFileSize
|
||||
filters.TLSUsername = u.Filters.TLSUsername
|
||||
filters.AllowedIP = make([]string, len(u.Filters.AllowedIP))
|
||||
|
@ -1092,7 +947,7 @@ func (u *User) getACopy() User {
|
|||
copy(filters.DeniedIP, u.Filters.DeniedIP)
|
||||
filters.DeniedLoginMethods = make([]string, len(u.Filters.DeniedLoginMethods))
|
||||
copy(filters.DeniedLoginMethods, u.Filters.DeniedLoginMethods)
|
||||
filters.FilePatterns = make([]PatternsFilter, len(u.Filters.FilePatterns))
|
||||
filters.FilePatterns = make([]sdk.PatternsFilter, len(u.Filters.FilePatterns))
|
||||
copy(filters.FilePatterns, u.Filters.FilePatterns)
|
||||
filters.DeniedProtocols = make([]string, len(u.Filters.DeniedProtocols))
|
||||
copy(filters.DeniedProtocols, u.Filters.DeniedProtocols)
|
||||
|
@ -1104,12 +959,12 @@ func (u *User) getACopy() User {
|
|||
copy(filters.WebClient, u.Filters.WebClient)
|
||||
|
||||
return User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
ID: u.ID,
|
||||
Username: u.Username,
|
||||
Password: u.Password,
|
||||
PublicKeys: pubKeys,
|
||||
HomeDir: u.HomeDir,
|
||||
VirtualFolders: virtualFolders,
|
||||
UID: u.UID,
|
||||
GID: u.GID,
|
||||
MaxSessions: u.MaxSessions,
|
||||
|
@ -1125,9 +980,11 @@ func (u *User) getACopy() User {
|
|||
ExpirationDate: u.ExpirationDate,
|
||||
LastLogin: u.LastLogin,
|
||||
Filters: filters,
|
||||
FsConfig: u.FsConfig.GetACopy(),
|
||||
AdditionalInfo: u.AdditionalInfo,
|
||||
Description: u.Description,
|
||||
},
|
||||
VirtualFolders: virtualFolders,
|
||||
FsConfig: u.FsConfig.GetACopy(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ The configuration file contains the following sections:
|
|||
|
||||
- **"common"**, configuration parameters shared among all the supported protocols
|
||||
- `idle_timeout`, integer. Time in minutes after which an idle client will be disconnected. 0 means disabled. Default: 15
|
||||
- `upload_mode` integer. 0 means standard: the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode, if there is an upload error, the temporary file is deleted and so the requested upload path will not contain a partial file. 2 means atomic with resume support: same as atomic but if there is an upload error, the temporary file is renamed to the requested path and not deleted. This way, a client can reconnect and resume the upload.
|
||||
- `upload_mode` integer. 0 means standard: the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode, if there is an upload error, the temporary file is deleted and so the requested upload path will not contain a partial file. 2 means atomic with resume support: same as atomic but if there is an upload error, the temporary file is renamed to the requested path and not deleted. This way, a client can reconnect and resume the upload. Default: 0
|
||||
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See [Custom Actions](./custom-actions.md) for more details
|
||||
- `execute_on`, list of strings. Valid values are `pre-download`, `download`, `pre-upload`, `upload`, `pre-delete`, `delete`, `rename`, `ssh_cmd`. Leave empty to disable actions.
|
||||
- `execute_sync`, list of strings. Actions to be performed synchronously. The `pre-delete` action is always executed synchronously while the other ones are asynchronous. Executing an action synchronously means that SFTPGo will not return a result code to the client (which is waiting for it) until your hook have completed its execution. Leave empty to execute only the `pre-delete` hook synchronously
|
||||
|
@ -238,6 +238,17 @@ The configuration file contains the following sections:
|
|||
- `secrets`
|
||||
- `url`
|
||||
- `master_key_path`
|
||||
- **plugins**, list of external plugins. Each plugin is configured using a struct with the following fields:
|
||||
- `type`, string. Defines the plugin type. Supported types: `notifier`.
|
||||
- `notifier_options`, struct. Defines the options for notifier plugins.
|
||||
- `fs_events`, list of strings. Defines the filesystem events that will be notified to this plugin.
|
||||
- `user_events`, list of strings. Defines the user events that will be notified to this plugin.
|
||||
- `cmd`, string. Path to the plugin executable.
|
||||
- `args`, list of strings. Optional arguments to pass to the plugin executable.
|
||||
- `sha256sum`, string. SHA256 checksum for the plugin executable. If not empty it will be used to verify the integrity of the executable.
|
||||
- `auto_mtls`, boolean. If enabled the client and the server automatically negotiate mTLS for transport authentication. This ensures that only the original client will be allowed to connect to the server, and all other connections will be rejected. The client will also refuse to connect to any server that isn't the original instance started by the client.
|
||||
|
||||
Please note that the plugin system is experimental, the exposed configuration parameters and interfaces may change in a backward incompatible way in future.
|
||||
|
||||
A full example showing the default config (in JSON format) can be found [here](../sftpgo.json).
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/httpdtest"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
)
|
||||
|
||||
func TestBasicFTPHandlingCryptFs(t *testing.T) {
|
||||
|
@ -254,7 +254,7 @@ func TestResumeCryptFs(t *testing.T) {
|
|||
|
||||
func getTestUserWithCryptFs() dataprovider.User {
|
||||
user := getTestUser()
|
||||
user.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("testPassphrase")
|
||||
return user
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -55,7 +55,7 @@ type Binding struct {
|
|||
}
|
||||
|
||||
func (b *Binding) setCiphers() {
|
||||
b.ciphers = utils.GetTLSCiphersFromNames(b.TLSCipherSuites)
|
||||
b.ciphers = util.GetTLSCiphersFromNames(b.TLSCipherSuites)
|
||||
if len(b.ciphers) == 0 {
|
||||
b.ciphers = nil
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ func (c *Configuration) Initialize(configDir string) error {
|
|||
go func(s *Server) {
|
||||
ftpServer := ftpserver.NewFtpServer(s)
|
||||
logger.Info(logSender, "", "starting FTP serving, binding: %v", s.binding.GetAddress())
|
||||
utils.CheckTCP4Port(s.binding.Port)
|
||||
util.CheckTCP4Port(s.binding.Port)
|
||||
exitChannel <- ftpServer.ListenAndServe()
|
||||
}(server)
|
||||
|
||||
|
@ -245,7 +245,7 @@ func GetStatus() ServiceStatus {
|
|||
}
|
||||
|
||||
func getConfigPath(name, configDir string) string {
|
||||
if !utils.IsFileInputValid(name) {
|
||||
if !util.IsFileInputValid(name) {
|
||||
return ""
|
||||
}
|
||||
if name != "" && !filepath.IsAbs(name) {
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/httpdtest"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
@ -1117,7 +1118,7 @@ func TestDownloadErrors(t *testing.T) {
|
|||
u.Permissions[path.Join("/", subDir1)] = []string{dataprovider.PermListItems}
|
||||
u.Permissions[path.Join("/", subDir2)] = []string{dataprovider.PermListItems, dataprovider.PermUpload,
|
||||
dataprovider.PermDelete, dataprovider.PermDownload}
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/sub2",
|
||||
AllowedPatterns: []string{},
|
||||
|
@ -1169,7 +1170,7 @@ func TestUploadErrors(t *testing.T) {
|
|||
u.Permissions[path.Join("/", subDir1)] = []string{dataprovider.PermListItems}
|
||||
u.Permissions[path.Join("/", subDir2)] = []string{dataprovider.PermListItems, dataprovider.PermUpload,
|
||||
dataprovider.PermDelete}
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/sub2",
|
||||
AllowedPatterns: []string{},
|
||||
|
@ -1595,7 +1596,7 @@ func TestLoginWithIPilters(t *testing.T) {
|
|||
|
||||
func TestLoginWithDatabaseCredentials(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
u.FsConfig.GCSConfig.Bucket = "test"
|
||||
u.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret(`{ "type": "service_account" }`)
|
||||
|
||||
|
@ -1644,7 +1645,7 @@ func TestLoginWithDatabaseCredentials(t *testing.T) {
|
|||
|
||||
func TestLoginInvalidFs(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
u.FsConfig.GCSConfig.Bucket = "test"
|
||||
u.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret("invalid JSON for credentials")
|
||||
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||
|
@ -2435,7 +2436,7 @@ func TestCombine(t *testing.T) {
|
|||
func TestClientCertificateAuthRevokedCert(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Username = tlsClient2Username
|
||||
u.Filters.TLSUsername = dataprovider.TLSUsernameCN
|
||||
u.Filters.TLSUsername = sdk.TLSUsernameCN
|
||||
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
tlsConfig := &tls.Config{
|
||||
|
@ -2477,7 +2478,7 @@ func TestClientCertificateAuth(t *testing.T) {
|
|||
assert.Contains(t, err.Error(), "login method password is not allowed")
|
||||
}
|
||||
|
||||
user.Filters.TLSUsername = dataprovider.TLSUsernameCN
|
||||
user.Filters.TLSUsername = sdk.TLSUsernameCN
|
||||
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
client, err := getFTPClient(user, true, tlsConfig)
|
||||
|
@ -2491,7 +2492,7 @@ func TestClientCertificateAuth(t *testing.T) {
|
|||
// now use a valid certificate with a CN different from username
|
||||
u = getTestUser()
|
||||
u.Username = tlsClient2Username
|
||||
u.Filters.TLSUsername = dataprovider.TLSUsernameCN
|
||||
u.Filters.TLSUsername = sdk.TLSUsernameCN
|
||||
u.Filters.DeniedLoginMethods = []string{dataprovider.LoginMethodPassword}
|
||||
user2, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
|
@ -2537,7 +2538,7 @@ func TestClientCertificateAuth(t *testing.T) {
|
|||
func TestClientCertificateAndPwdAuth(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Username = tlsClient1Username
|
||||
u.Filters.TLSUsername = dataprovider.TLSUsernameCN
|
||||
u.Filters.TLSUsername = sdk.TLSUsernameCN
|
||||
u.Filters.DeniedLoginMethods = []string{dataprovider.LoginMethodPassword, dataprovider.LoginMethodTLSCertificate}
|
||||
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
|
@ -2588,7 +2589,7 @@ func TestExternatAuthWithClientCert(t *testing.T) {
|
|||
u := getTestUser()
|
||||
u.Username = tlsClient1Username
|
||||
u.Filters.DeniedLoginMethods = append(u.Filters.DeniedLoginMethods, dataprovider.LoginMethodPassword)
|
||||
u.Filters.TLSUsername = dataprovider.TLSUsernameCN
|
||||
u.Filters.TLSUsername = sdk.TLSUsernameCN
|
||||
err := dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
err = config.LoadConfig(configDir, "")
|
||||
|
@ -2655,7 +2656,7 @@ func TestPreLoginHookWithClientCert(t *testing.T) {
|
|||
u := getTestUser()
|
||||
u.Username = tlsClient1Username
|
||||
u.Filters.DeniedLoginMethods = append(u.Filters.DeniedLoginMethods, dataprovider.LoginMethodPassword)
|
||||
u.Filters.TLSUsername = dataprovider.TLSUsernameCN
|
||||
u.Filters.TLSUsername = sdk.TLSUsernameCN
|
||||
err := dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
err = config.LoadConfig(configDir, "")
|
||||
|
@ -2738,11 +2739,13 @@ func TestNestedVirtualFolders(t *testing.T) {
|
|||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: folderNameCrypt,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
MappedPath: mappedPathCrypt,
|
||||
},
|
||||
VirtualPath: vdirCryptPath,
|
||||
|
@ -2976,11 +2979,13 @@ func waitNoConnections() {
|
|||
|
||||
func getTestUser() dataprovider.User {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: defaultUsername,
|
||||
Password: defaultPassword,
|
||||
HomeDir: filepath.Join(homeBasePath, defaultUsername),
|
||||
Status: 1,
|
||||
ExpirationDate: 0,
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = allPerms
|
||||
|
@ -2990,7 +2995,7 @@ func getTestUser() dataprovider.User {
|
|||
func getTestSFTPUser() dataprovider.User {
|
||||
u := getTestUser()
|
||||
u.Username = u.Username + "_sftp"
|
||||
u.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
u.FsConfig.SFTPConfig.Endpoint = sftpServerAddr
|
||||
u.FsConfig.SFTPConfig.Username = defaultUsername
|
||||
u.FsConfig.SFTPConfig.Password = kms.NewPlainSecret(defaultPassword)
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -466,7 +467,9 @@ func TestServerGetSettings(t *testing.T) {
|
|||
|
||||
func TestUserInvalidParams(t *testing.T) {
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: "invalid",
|
||||
},
|
||||
}
|
||||
binding := Binding{
|
||||
Port: 2121,
|
||||
|
@ -548,7 +551,9 @@ func TestDriverMethodsNotImplemented(t *testing.T) {
|
|||
|
||||
func TestResolvePathErrors(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: "invalid",
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -609,8 +614,10 @@ func TestUploadFileStatError(t *testing.T) {
|
|||
t.Skip("this test is not available on Windows")
|
||||
}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user",
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -638,8 +645,10 @@ func TestUploadFileStatError(t *testing.T) {
|
|||
|
||||
func TestAVBLErrors(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user",
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -658,8 +667,10 @@ func TestAVBLErrors(t *testing.T) {
|
|||
|
||||
func TestUploadOverwriteErrors(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user",
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -712,8 +723,10 @@ func TestTransferErrors(t *testing.T) {
|
|||
file, err := os.Create(testfile)
|
||||
assert.NoError(t, err)
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user",
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
|
|
@ -15,8 +15,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
|
@ -135,7 +135,7 @@ func (s *Server) GetSettings() (*ftpserver.Settings, error) {
|
|||
|
||||
// ClientConnected is called to send the very first welcome message
|
||||
func (s *Server) ClientConnected(cc ftpserver.ClientContext) (string, error) {
|
||||
ipAddr := utils.GetIPFromRemoteAddress(cc.RemoteAddr().String())
|
||||
ipAddr := util.GetIPFromRemoteAddress(cc.RemoteAddr().String())
|
||||
common.Connections.AddClientConnection(ipAddr)
|
||||
if common.IsBanned(ipAddr) {
|
||||
logger.Log(logger.LevelDebug, common.ProtocolFTP, "", "connection refused, ip %#v is banned", ipAddr)
|
||||
|
@ -167,7 +167,7 @@ func (s *Server) ClientDisconnected(cc ftpserver.ClientContext) {
|
|||
s.cleanTLSConnVerification(cc.ID())
|
||||
connID := fmt.Sprintf("%v_%v_%v", common.ProtocolFTP, s.ID, cc.ID())
|
||||
common.Connections.Remove(connID)
|
||||
common.Connections.RemoveClientConnection(utils.GetIPFromRemoteAddress(cc.RemoteAddr().String()))
|
||||
common.Connections.RemoveClientConnection(util.GetIPFromRemoteAddress(cc.RemoteAddr().String()))
|
||||
}
|
||||
|
||||
// AuthUser authenticates the user and selects an handling driver
|
||||
|
@ -176,7 +176,7 @@ func (s *Server) AuthUser(cc ftpserver.ClientContext, username, password string)
|
|||
if s.isTLSConnVerified(cc.ID()) {
|
||||
loginMethod = dataprovider.LoginMethodTLSCertificateAndPwd
|
||||
}
|
||||
ipAddr := utils.GetIPFromRemoteAddress(cc.RemoteAddr().String())
|
||||
ipAddr := util.GetIPFromRemoteAddress(cc.RemoteAddr().String())
|
||||
user, err := dataprovider.CheckUserAndPass(username, password, ipAddr, common.ProtocolFTP)
|
||||
if err != nil {
|
||||
user.Username = username
|
||||
|
@ -206,7 +206,7 @@ func (s *Server) VerifyConnection(cc ftpserver.ClientContext, user string, tlsCo
|
|||
if tlsConn != nil {
|
||||
state := tlsConn.ConnectionState()
|
||||
if len(state.PeerCertificates) > 0 {
|
||||
ipAddr := utils.GetIPFromRemoteAddress(cc.RemoteAddr().String())
|
||||
ipAddr := util.GetIPFromRemoteAddress(cc.RemoteAddr().String())
|
||||
dbUser, err := dataprovider.CheckUserBeforeTLSAuth(user, ipAddr, common.ProtocolFTP, state.PeerCertificates[0])
|
||||
if err != nil {
|
||||
dbUser.Username = user
|
||||
|
@ -307,7 +307,7 @@ func (s *Server) validateUser(user dataprovider.User, cc ftpserver.ClientContext
|
|||
user.Username, user.HomeDir)
|
||||
return nil, fmt.Errorf("cannot login user with invalid home dir: %#v", user.HomeDir)
|
||||
}
|
||||
if utils.IsStringInSlice(common.ProtocolFTP, user.Filters.DeniedProtocols) {
|
||||
if util.IsStringInSlice(common.ProtocolFTP, user.Filters.DeniedProtocols) {
|
||||
logger.Debug(logSender, connectionID, "cannot login user %#v, protocol FTP is not allowed", user.Username)
|
||||
return nil, fmt.Errorf("protocol FTP is not allowed for user %#v", user.Username)
|
||||
}
|
||||
|
@ -348,16 +348,16 @@ func (s *Server) validateUser(user dataprovider.User, cc ftpserver.ClientContext
|
|||
}
|
||||
|
||||
func updateLoginMetrics(user *dataprovider.User, ip, loginMethod string, err error) {
|
||||
metrics.AddLoginAttempt(loginMethod)
|
||||
metric.AddLoginAttempt(loginMethod)
|
||||
if err != nil && err != common.ErrInternalFailure {
|
||||
logger.ConnectionFailedLog(user.Username, ip, loginMethod,
|
||||
common.ProtocolFTP, err.Error())
|
||||
event := common.HostEventLoginFailed
|
||||
if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
event = common.HostEventUserNotFound
|
||||
}
|
||||
common.AddDefenderEvent(ip, event)
|
||||
}
|
||||
metrics.AddLoginResult(loginMethod, err)
|
||||
metric.AddLoginResult(loginMethod, err)
|
||||
dataprovider.ExecutePostLoginHook(user, loginMethod, ip, common.ProtocolFTP, err)
|
||||
}
|
||||
|
|
23
go.mod
23
go.mod
|
@ -6,11 +6,12 @@ require (
|
|||
cloud.google.com/go/storage v1.16.0
|
||||
github.com/Azure/azure-storage-blob-go v0.14.0
|
||||
github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962
|
||||
github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect
|
||||
github.com/StackExchange/wmi v1.2.0 // indirect
|
||||
github.com/alexedwards/argon2id v0.0.0-20210511081203-7d35d68092b8
|
||||
github.com/aws/aws-sdk-go v1.39.0
|
||||
github.com/aws/aws-sdk-go v1.39.4
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.1.1
|
||||
github.com/eikenb/pipeat v0.0.0-20210603033007-44fc3ffce52b
|
||||
github.com/fatih/color v1.12.0 // indirect
|
||||
github.com/fclairamb/ftpserverlib v0.14.0
|
||||
github.com/frankban/quicktest v1.13.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.0.3
|
||||
|
@ -18,22 +19,28 @@ require (
|
|||
github.com/go-chi/render v1.0.1
|
||||
github.com/go-ole/go-ole v1.2.5 // indirect
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/goccy/go-json v0.7.3 // indirect
|
||||
github.com/goccy/go-json v0.7.4 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/grandcat/zeroconf v1.0.0
|
||||
github.com/hashicorp/go-hclog v0.16.2
|
||||
github.com/hashicorp/go-plugin v1.4.2
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0
|
||||
github.com/hashicorp/yamux v0.0.0-20210707203944-259a57b3608c // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20201112195030-9aae4d151126
|
||||
github.com/klauspost/compress v1.13.1
|
||||
github.com/klauspost/cpuid/v2 v2.0.7 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.0.8 // indirect
|
||||
github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect
|
||||
github.com/lestrrat-go/jwx v1.2.1
|
||||
github.com/lib/pq v1.10.2
|
||||
github.com/mattn/go-isatty v0.0.13 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.7
|
||||
github.com/miekg/dns v1.1.43 // indirect
|
||||
github.com/minio/sio v0.3.0
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/otiai10/copy v1.6.0
|
||||
github.com/pires/go-proxyproto v0.5.0
|
||||
github.com/pkg/sftp v1.13.1
|
||||
github.com/pires/go-proxyproto v0.6.0
|
||||
github.com/pkg/sftp v1.13.2
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/common v0.29.0 // indirect
|
||||
github.com/rs/cors v1.8.0
|
||||
|
@ -56,7 +63,9 @@ require (
|
|||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6
|
||||
google.golang.org/api v0.50.0
|
||||
google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210708141623-e76da96a951f // indirect
|
||||
google.golang.org/grpc v1.39.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
)
|
||||
|
||||
|
|
60
go.sum
60
go.sum
|
@ -103,8 +103,8 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
|
|||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY=
|
||||
github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/StackExchange/wmi v1.2.0 h1:noJEYkMQVlFCEAc+2ma5YyRhlfjcWfZqk5sBRYozdyM=
|
||||
github.com/StackExchange/wmi v1.2.0/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
|
@ -131,8 +131,8 @@ github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.39.0 h1:74BBwkEmiqBbi2CGflEh34l0YNtIibTjZsibGarkNjo=
|
||||
github.com/aws/aws-sdk-go v1.39.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.39.4 h1:nXBChUaG5cinrl3yg4/rUyssOOLH/ohk4S9K03kJirE=
|
||||
github.com/aws/aws-sdk-go v1.39.4/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -157,6 +157,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.1.1 h1:3XzfSMuUT0wBe1a3o5C0eOTcArhmmFAg2Jzh/7hhKqo=
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM=
|
||||
|
@ -223,9 +224,11 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
|||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc=
|
||||
github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fclairamb/ftpserverlib v0.14.0 h1:hF7cOVgihzmUwC4+i31iZ8MeCwK5IUipSZEDi4g6G4w=
|
||||
github.com/fclairamb/ftpserverlib v0.14.0/go.mod h1:ATLgn4bHgiM9+vfZbK+rMu/dqgkxO5nk94x/9f8ffDI=
|
||||
|
@ -288,8 +291,8 @@ github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22
|
|||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/goccy/go-json v0.4.8/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.7.3 h1:Pznres7bC8RRKT9yOn3EZ7fK+8Kle6K9rW2U33QlXZI=
|
||||
github.com/goccy/go-json v0.7.3/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.7.4 h1:B44qRUFwz/vxPKPISQ1KhvzRi9kZ28RAf6YtjriBZ5k=
|
||||
github.com/goccy/go-json v0.7.4/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
|
@ -419,8 +422,9 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S
|
|||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
|
||||
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
|
||||
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
|
||||
|
@ -430,6 +434,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+
|
|||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||
github.com/hashicorp/go-plugin v1.4.2 h1:yFvG3ufXXpqiMiZx9HLcaK3XbIqQ1WJFR/F1a2CuVw0=
|
||||
github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
|
@ -466,6 +472,8 @@ github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:W
|
|||
github.com/hashicorp/vault/sdk v0.2.0 h1:hvVswvMA9LvXwLBFDJLIoDBXi8hj90Q+gSS7vRYmLvQ=
|
||||
github.com/hashicorp/vault/sdk v0.2.0/go.mod h1:cAGI4nVnEfAyMeqt9oB+Mase8DNn3qA/LDNHURiwssY=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hashicorp/yamux v0.0.0-20210707203944-259a57b3608c h1:nqkErwUGfpZZMqj29WZ9U/wz2OpJVDuiokLhE/3Y7IQ=
|
||||
github.com/hashicorp/yamux v0.0.0-20210707203944-259a57b3608c/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
@ -514,6 +522,8 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f
|
|||
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
|
||||
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
|
@ -549,8 +559,8 @@ github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
|
|||
github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ=
|
||||
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.7 h1:U89pAFid7wpIWvTFJnMKgU+Sabb7DLEgHI7Xt8apo3Y=
|
||||
github.com/klauspost/cpuid/v2 v2.0.7/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.8 h1:bhR2mgIlno/Sfk4oUbH4sPlc83z1yGrN9bvqiq3C33I=
|
||||
github.com/klauspost/cpuid/v2 v2.0.8/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -603,8 +613,9 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
|||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
|
||||
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
|
@ -614,8 +625,9 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
|
|||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
|
||||
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA=
|
||||
|
@ -637,6 +649,8 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG
|
|||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
|
||||
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
|
@ -664,6 +678,8 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
|
|||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
||||
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
|
@ -705,8 +721,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
|
|||
github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pires/go-proxyproto v0.5.0 h1:A4Jv4ZCaV3AFJeGh5mGwkz4iuWUYMlQ7IoO/GTuSuLo=
|
||||
github.com/pires/go-proxyproto v0.5.0/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY=
|
||||
github.com/pires/go-proxyproto v0.6.0 h1:cLJUPnuQdiNf7P/wbeOKmM1khVdaMgTFDLj8h9ZrVYk=
|
||||
github.com/pires/go-proxyproto v0.6.0/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -714,8 +730,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs=
|
||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pkg/sftp v1.13.2 h1:taJnKntsWgU+qae21Rx52lIwndAdKrj0mfUNQsz1z4Q=
|
||||
github.com/pkg/sftp v1.13.2/go.mod h1:LzqnAvaD5TWeNBsZpfKxSYn1MbjWwOsCIAFFJbpIsK8=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
|
@ -829,7 +845,9 @@ github.com/studio-b12/gowebdav v0.0.0-20210630100626-7ff61aa87be8 h1:ipNUBPHSUmH
|
|||
github.com/studio-b12/gowebdav v0.0.0-20210630100626-7ff61aa87be8/go.mod h1:gCcfDlA1Y7GqOaeEKw5l9dOGx1VLdc/HuQSlQAaZ30s=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tklauser/go-sysconf v0.3.6 h1:oc1sJWvKkmvIxhDHeKWvZS4f6AW+YcoguSfRF2/Hmo4=
|
||||
github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
|
||||
github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA=
|
||||
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
|
@ -865,6 +883,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
|
@ -1169,6 +1188,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
|||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
|
@ -1224,8 +1244,9 @@ google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxH
|
|||
google.golang.org/genproto v0.0.0-20210617175327-b9e0b3197ced/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210624174822-c5cf32407d0a/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9 h1:HBPuvo39L0DgfVn9eHR3ki/RjZoUFWa+em77e7KFDfs=
|
||||
google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U=
|
||||
google.golang.org/genproto v0.0.0-20210708141623-e76da96a951f h1:khwpF3oSk7GIab/7DDMDyE8cPQEO6FAfOcWHIRAhO20=
|
||||
google.golang.org/genproto v0.0.0-20210708141623-e76da96a951f/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
|
@ -1254,8 +1275,9 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI=
|
||||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/hashicorp/go-retryablehttp"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// TLSKeyPair defines the paths for a TLS key pair
|
||||
|
@ -112,7 +112,7 @@ func (c *Config) loadCACerts(configDir string) (*x509.CertPool, error) {
|
|||
}
|
||||
|
||||
for _, ca := range c.CACertificates {
|
||||
if !utils.IsFileInputValid(ca) {
|
||||
if !util.IsFileInputValid(ca) {
|
||||
return nil, fmt.Errorf("unable to load invalid CA certificate: %#v", ca)
|
||||
}
|
||||
if !filepath.IsAbs(ca) {
|
||||
|
@ -139,10 +139,10 @@ func (c *Config) loadCertificates(configDir string) error {
|
|||
for _, keyPair := range c.Certificates {
|
||||
cert := keyPair.Cert
|
||||
key := keyPair.Key
|
||||
if !utils.IsFileInputValid(cert) {
|
||||
if !util.IsFileInputValid(cert) {
|
||||
return fmt.Errorf("unable to load invalid certificate: %#v", cert)
|
||||
}
|
||||
if !utils.IsFileInputValid(key) {
|
||||
if !util.IsFileInputValid(key) {
|
||||
return fmt.Errorf("unable to load invalid key: %#v", key)
|
||||
}
|
||||
if !filepath.IsAbs(cert) {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/go-chi/render"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
func getAdmins(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -141,13 +141,13 @@ func changeAdminPassword(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
func doChangeAdminPassword(r *http.Request, currentPassword, newPassword, confirmNewPassword string) error {
|
||||
if currentPassword == "" || newPassword == "" || confirmNewPassword == "" {
|
||||
return utils.NewValidationError("please provide the current password and the new one two times")
|
||||
return util.NewValidationError("please provide the current password and the new one two times")
|
||||
}
|
||||
if newPassword != confirmNewPassword {
|
||||
return utils.NewValidationError("the two password fields do not match")
|
||||
return util.NewValidationError("the two password fields do not match")
|
||||
}
|
||||
if currentPassword == newPassword {
|
||||
return utils.NewValidationError("the new password must be different from the current one")
|
||||
return util.NewValidationError("the new password must be different from the current one")
|
||||
}
|
||||
claims, err := getTokenClaims(r)
|
||||
if err != nil {
|
||||
|
@ -159,7 +159,7 @@ func doChangeAdminPassword(r *http.Request, currentPassword, newPassword, confir
|
|||
}
|
||||
match, err := admin.CheckPassword(currentPassword)
|
||||
if !match || err != nil {
|
||||
return utils.NewValidationError("current password does not match")
|
||||
return util.NewValidationError("current password does not match")
|
||||
}
|
||||
|
||||
admin.Password = newPassword
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
func readUserFolder(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -39,7 +39,7 @@ func readUserFolder(w http.ResponseWriter, r *http.Request) {
|
|||
common.Connections.Add(connection)
|
||||
defer common.Connections.Remove(connection.GetID())
|
||||
|
||||
name := utils.CleanPath(r.URL.Query().Get("path"))
|
||||
name := util.CleanPath(r.URL.Query().Get("path"))
|
||||
contents, err := connection.ReadDir(name)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "Unable to get directory contents", getMappedStatusCode(err))
|
||||
|
@ -84,7 +84,7 @@ func getUserFile(w http.ResponseWriter, r *http.Request) {
|
|||
common.Connections.Add(connection)
|
||||
defer common.Connections.Remove(connection.GetID())
|
||||
|
||||
name := utils.CleanPath(r.URL.Query().Get("path"))
|
||||
name := util.CleanPath(r.URL.Query().Get("path"))
|
||||
if name == "/" {
|
||||
sendAPIResponse(w, r, nil, "Please set the path to a valid file", http.StatusBadRequest)
|
||||
return
|
||||
|
@ -145,7 +145,7 @@ func getUserFilesAsZipStream(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
baseDir := "/"
|
||||
for idx := range filesList {
|
||||
filesList[idx] = utils.CleanPath(filesList[idx])
|
||||
filesList[idx] = util.CleanPath(filesList[idx])
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=\"sftpgo-download.zip\"")
|
||||
|
@ -215,22 +215,22 @@ func changeUserPassword(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
func doChangeUserPassword(r *http.Request, currentPassword, newPassword, confirmNewPassword string) error {
|
||||
if currentPassword == "" || newPassword == "" || confirmNewPassword == "" {
|
||||
return utils.NewValidationError("please provide the current password and the new one two times")
|
||||
return util.NewValidationError("please provide the current password and the new one two times")
|
||||
}
|
||||
if newPassword != confirmNewPassword {
|
||||
return utils.NewValidationError("the two password fields do not match")
|
||||
return util.NewValidationError("the two password fields do not match")
|
||||
}
|
||||
if currentPassword == newPassword {
|
||||
return utils.NewValidationError("the new password must be different from the current one")
|
||||
return util.NewValidationError("the new password must be different from the current one")
|
||||
}
|
||||
claims, err := getTokenClaims(r)
|
||||
if err != nil || claims.Username == "" {
|
||||
return errors.New("invalid token claims")
|
||||
}
|
||||
user, err := dataprovider.CheckUserAndPass(claims.Username, currentPassword, utils.GetIPFromRemoteAddress(r.RemoteAddr),
|
||||
user, err := dataprovider.CheckUserAndPass(claims.Username, currentPassword, util.GetIPFromRemoteAddress(r.RemoteAddr),
|
||||
common.ProtocolHTTP)
|
||||
if err != nil {
|
||||
return utils.NewValidationError("current password does not match")
|
||||
return util.NewValidationError("current password does not match")
|
||||
}
|
||||
user.Password = newPassword
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -105,7 +105,7 @@ func loadDataFromRequest(w http.ResponseWriter, r *http.Request) {
|
|||
content, err := io.ReadAll(r.Body)
|
||||
if err != nil || len(content) == 0 {
|
||||
if len(content) == 0 {
|
||||
err = utils.NewValidationError("request body is required")
|
||||
err = util.NewValidationError("request body is required")
|
||||
}
|
||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
return
|
||||
|
@ -151,7 +151,7 @@ func loadData(w http.ResponseWriter, r *http.Request) {
|
|||
func restoreBackup(content []byte, inputFile string, scanQuota, mode int) error {
|
||||
dump, err := dataprovider.ParseDumpData(content)
|
||||
if err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("Unable to parse backup content: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("Unable to parse backup content: %v", err))
|
||||
}
|
||||
|
||||
if err = RestoreFolders(dump.Folders, inputFile, mode, scanQuota); err != nil {
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -59,17 +60,17 @@ func addUser(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
user.SetEmptySecretsIfNil()
|
||||
switch user.FsConfig.Provider {
|
||||
case vfs.S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
if user.FsConfig.S3Config.AccessSecret.IsRedacted() {
|
||||
sendAPIResponse(w, r, errors.New("invalid access_secret"), "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
case vfs.GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
if user.FsConfig.GCSConfig.Credentials.IsRedacted() {
|
||||
sendAPIResponse(w, r, errors.New("invalid credentials"), "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
case vfs.AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
if user.FsConfig.AzBlobConfig.AccountKey.IsRedacted() {
|
||||
sendAPIResponse(w, r, errors.New("invalid account_key"), "", http.StatusBadRequest)
|
||||
return
|
||||
|
@ -78,12 +79,12 @@ func addUser(w http.ResponseWriter, r *http.Request) {
|
|||
sendAPIResponse(w, r, errors.New("invalid sas_url"), "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
case vfs.CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
if user.FsConfig.CryptConfig.Passphrase.IsRedacted() {
|
||||
sendAPIResponse(w, r, errors.New("invalid passphrase"), "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
case vfs.SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
if user.FsConfig.SFTPConfig.Password.IsRedacted() {
|
||||
sendAPIResponse(w, r, errors.New("invalid SFTP password"), "", http.StatusBadRequest)
|
||||
return
|
||||
|
@ -185,26 +186,26 @@ func updateEncryptedSecrets(fsConfig *vfs.Filesystem, currentS3AccessSecret, cur
|
|||
currentGCSCredentials, currentCryptoPassphrase, currentSFTPPassword, currentSFTPKey *kms.Secret) {
|
||||
// we use the new access secret if plain or empty, otherwise the old value
|
||||
switch fsConfig.Provider {
|
||||
case vfs.S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
if fsConfig.S3Config.AccessSecret.IsNotPlainAndNotEmpty() {
|
||||
fsConfig.S3Config.AccessSecret = currentS3AccessSecret
|
||||
}
|
||||
case vfs.AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
if fsConfig.AzBlobConfig.AccountKey.IsNotPlainAndNotEmpty() {
|
||||
fsConfig.AzBlobConfig.AccountKey = currentAzAccountKey
|
||||
}
|
||||
if fsConfig.AzBlobConfig.SASURL.IsNotPlainAndNotEmpty() {
|
||||
fsConfig.AzBlobConfig.SASURL = currentAzSASUrl
|
||||
}
|
||||
case vfs.GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
if fsConfig.GCSConfig.Credentials.IsNotPlainAndNotEmpty() {
|
||||
fsConfig.GCSConfig.Credentials = currentGCSCredentials
|
||||
}
|
||||
case vfs.CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
if fsConfig.CryptConfig.Passphrase.IsNotPlainAndNotEmpty() {
|
||||
fsConfig.CryptConfig.Passphrase = currentCryptoPassphrase
|
||||
}
|
||||
case vfs.SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
if fsConfig.SFTPConfig.Password.IsNotPlainAndNotEmpty() {
|
||||
fsConfig.SFTPConfig.Password = currentSFTPPassword
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
type pwdChange struct {
|
||||
|
@ -42,13 +42,13 @@ func sendAPIResponse(w http.ResponseWriter, r *http.Request, err error, message
|
|||
}
|
||||
|
||||
func getRespStatus(err error) int {
|
||||
if _, ok := err.(*utils.ValidationError); ok {
|
||||
if _, ok := err.(*util.ValidationError); ok {
|
||||
return http.StatusBadRequest
|
||||
}
|
||||
if _, ok := err.(*utils.MethodDisabledError); ok {
|
||||
if _, ok := err.(*util.MethodDisabledError); ok {
|
||||
return http.StatusForbidden
|
||||
}
|
||||
if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
return http.StatusNotFound
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
|
@ -362,21 +362,21 @@ func parseRangeRequest(bytesRange string, size int64) (int64, int64, error) {
|
|||
}
|
||||
|
||||
func updateLoginMetrics(user *dataprovider.User, ip string, err error) {
|
||||
metrics.AddLoginAttempt(dataprovider.LoginMethodPassword)
|
||||
metric.AddLoginAttempt(dataprovider.LoginMethodPassword)
|
||||
if err != nil && err != common.ErrInternalFailure {
|
||||
logger.ConnectionFailedLog(user.Username, ip, dataprovider.LoginMethodPassword, common.ProtocolHTTP, err.Error())
|
||||
event := common.HostEventLoginFailed
|
||||
if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
event = common.HostEventUserNotFound
|
||||
}
|
||||
common.AddDefenderEvent(ip, event)
|
||||
}
|
||||
metrics.AddLoginResult(dataprovider.LoginMethodPassword, err)
|
||||
metric.AddLoginResult(dataprovider.LoginMethodPassword, err)
|
||||
dataprovider.ExecutePostLoginHook(user, dataprovider.LoginMethodPassword, ip, common.ProtocolHTTP, err)
|
||||
}
|
||||
|
||||
func checkHTTPClientUser(user *dataprovider.User, r *http.Request, connectionID string) error {
|
||||
if utils.IsStringInSlice(common.ProtocolHTTP, user.Filters.DeniedProtocols) {
|
||||
if util.IsStringInSlice(common.ProtocolHTTP, user.Filters.DeniedProtocols) {
|
||||
logger.Debug(logSender, connectionID, "cannot login user %#v, protocol HTTP is not allowed", user.Username)
|
||||
return fmt.Errorf("protocol HTTP is not allowed for user %#v", user.Username)
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
type tokenAudience = string
|
||||
|
@ -83,24 +83,24 @@ func (c *jwtTokenClaims) Decode(token map[string]interface{}) {
|
|||
}
|
||||
|
||||
func (c *jwtTokenClaims) isCriticalPermRemoved(permissions []string) bool {
|
||||
if utils.IsStringInSlice(dataprovider.PermAdminAny, permissions) {
|
||||
if util.IsStringInSlice(dataprovider.PermAdminAny, permissions) {
|
||||
return false
|
||||
}
|
||||
if (utils.IsStringInSlice(dataprovider.PermAdminManageAdmins, c.Permissions) ||
|
||||
utils.IsStringInSlice(dataprovider.PermAdminAny, c.Permissions)) &&
|
||||
!utils.IsStringInSlice(dataprovider.PermAdminManageAdmins, permissions) &&
|
||||
!utils.IsStringInSlice(dataprovider.PermAdminAny, permissions) {
|
||||
if (util.IsStringInSlice(dataprovider.PermAdminManageAdmins, c.Permissions) ||
|
||||
util.IsStringInSlice(dataprovider.PermAdminAny, c.Permissions)) &&
|
||||
!util.IsStringInSlice(dataprovider.PermAdminManageAdmins, permissions) &&
|
||||
!util.IsStringInSlice(dataprovider.PermAdminAny, permissions) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *jwtTokenClaims) hasPerm(perm string) bool {
|
||||
if utils.IsStringInSlice(dataprovider.PermAdminAny, c.Permissions) {
|
||||
if util.IsStringInSlice(dataprovider.PermAdminAny, c.Permissions) {
|
||||
return true
|
||||
}
|
||||
|
||||
return utils.IsStringInSlice(perm, c.Permissions)
|
||||
return util.IsStringInSlice(perm, c.Permissions)
|
||||
}
|
||||
|
||||
func (c *jwtTokenClaims) createTokenResponse(tokenAuth *jwtauth.JWTAuth, audience tokenAudience) (map[string]interface{}, error) {
|
||||
|
@ -253,7 +253,7 @@ func verifyCSRFToken(tokenString string) error {
|
|||
return fmt.Errorf("unable to verify form token: %v", err)
|
||||
}
|
||||
|
||||
if !utils.IsStringInSlice(tokenAudienceCSRF, token.Audience()) {
|
||||
if !util.IsStringInSlice(tokenAudienceCSRF, token.Audience()) {
|
||||
logger.Debug(logSender, "", "error validating CSRF token audience")
|
||||
return errors.New("the form token is not valid")
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// Connection details for a HTTP connection used to inteact with an SFTPGo filesystem
|
||||
|
@ -53,7 +53,7 @@ func (c *Connection) GetCommand() string {
|
|||
func (c *Connection) Stat(name string, mode int) (os.FileInfo, error) {
|
||||
c.UpdateLastActivity()
|
||||
|
||||
name = utils.CleanPath(name)
|
||||
name = util.CleanPath(name)
|
||||
if !c.User.HasPerm(dataprovider.PermListItems, path.Dir(name)) {
|
||||
return nil, c.GetPermissionDeniedError()
|
||||
}
|
||||
|
@ -70,14 +70,14 @@ func (c *Connection) Stat(name string, mode int) (os.FileInfo, error) {
|
|||
func (c *Connection) ReadDir(name string) ([]os.FileInfo, error) {
|
||||
c.UpdateLastActivity()
|
||||
|
||||
name = utils.CleanPath(name)
|
||||
name = util.CleanPath(name)
|
||||
return c.ListDir(name)
|
||||
}
|
||||
|
||||
func (c *Connection) getFileReader(name string, offset int64, method string) (io.ReadCloser, error) {
|
||||
c.UpdateLastActivity()
|
||||
|
||||
name = utils.CleanPath(name)
|
||||
name = util.CleanPath(name)
|
||||
if !c.User.HasPerm(dataprovider.PermDownload, path.Dir(name)) {
|
||||
return nil, c.GetPermissionDeniedError()
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/ftpd"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/webdavd"
|
||||
)
|
||||
|
||||
|
@ -184,7 +184,7 @@ type Binding struct {
|
|||
}
|
||||
|
||||
func (b *Binding) parseAllowedProxy() error {
|
||||
allowedFuncs, err := utils.ParseAllowedIPAndRanges(b.ProxyAllowed)
|
||||
allowedFuncs, err := util.ParseAllowedIPAndRanges(b.ProxyAllowed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -382,7 +382,7 @@ func ReloadCertificateMgr() error {
|
|||
}
|
||||
|
||||
func getConfigPath(name, configDir string) string {
|
||||
if !utils.IsFileInputValid(name) {
|
||||
if !util.IsFileInputValid(name) {
|
||||
return ""
|
||||
}
|
||||
if name != "" && !filepath.IsAbs(name) {
|
||||
|
@ -530,5 +530,5 @@ func getSigningKey(signingPassphrase string) []byte {
|
|||
sk := sha256.Sum256([]byte(signingPassphrase))
|
||||
return sk[:]
|
||||
}
|
||||
return utils.GenerateRandomBytes(32)
|
||||
return util.GenerateRandomBytes(32)
|
||||
}
|
||||
|
|
|
@ -40,8 +40,9 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/httpdtest"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -369,10 +370,10 @@ func TestBasicUserHandling(t *testing.T) {
|
|||
user.QuotaFiles = 2
|
||||
user.UploadBandwidth = 128
|
||||
user.DownloadBandwidth = 64
|
||||
user.ExpirationDate = utils.GetTimeAsMsSinceEpoch(time.Now())
|
||||
user.ExpirationDate = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
user.AdditionalInfo = "some free text"
|
||||
user.Filters.TLSUsername = dataprovider.TLSUsernameCN
|
||||
user.Filters.WebClient = append(user.Filters.WebClient, dataprovider.WebClientPubKeyChangeDisabled)
|
||||
user.Filters.TLSUsername = sdk.TLSUsernameCN
|
||||
user.Filters.WebClient = append(user.Filters.WebClient, sdk.WebClientPubKeyChangeDisabled)
|
||||
originalUser := user
|
||||
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
|
@ -839,7 +840,7 @@ func TestAddUserInvalidFilters(t *testing.T) {
|
|||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.Filters.DeniedLoginMethods = []string{}
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "relative",
|
||||
AllowedPatterns: []string{},
|
||||
|
@ -848,7 +849,7 @@ func TestAddUserInvalidFilters(t *testing.T) {
|
|||
}
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/",
|
||||
AllowedPatterns: []string{},
|
||||
|
@ -857,7 +858,7 @@ func TestAddUserInvalidFilters(t *testing.T) {
|
|||
}
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/subdir",
|
||||
AllowedPatterns: []string{"*.zip"},
|
||||
|
@ -871,7 +872,7 @@ func TestAddUserInvalidFilters(t *testing.T) {
|
|||
}
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "relative",
|
||||
AllowedPatterns: []string{},
|
||||
|
@ -880,7 +881,7 @@ func TestAddUserInvalidFilters(t *testing.T) {
|
|||
}
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/",
|
||||
AllowedPatterns: []string{},
|
||||
|
@ -889,7 +890,7 @@ func TestAddUserInvalidFilters(t *testing.T) {
|
|||
}
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/subdir",
|
||||
AllowedPatterns: []string{"*.zip"},
|
||||
|
@ -902,7 +903,7 @@ func TestAddUserInvalidFilters(t *testing.T) {
|
|||
}
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/subdir",
|
||||
AllowedPatterns: []string{"a\\"},
|
||||
|
@ -928,7 +929,7 @@ func TestAddUserInvalidFilters(t *testing.T) {
|
|||
|
||||
func TestAddUserInvalidFsConfig(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
u.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
u.FsConfig.S3Config.Bucket = ""
|
||||
_, _, err := httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
|
@ -960,7 +961,7 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
|
|||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u = getTestUser()
|
||||
u.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
u.FsConfig.GCSConfig.Bucket = ""
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
|
@ -983,7 +984,7 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
u = getTestUser()
|
||||
u.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
u.FsConfig.AzBlobConfig.SASURL = kms.NewPlainSecret("http://foo\x7f.com/")
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1013,14 +1014,14 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
u = getTestUser()
|
||||
u.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.FsConfig.CryptConfig.Passphrase = kms.NewSecret(kms.SecretStatusRedacted, "akey", "", "")
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u = getTestUser()
|
||||
u.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
u.FsConfig.SFTPConfig.Password = kms.NewSecret(kms.SecretStatusRedacted, "randompkey", "", "")
|
||||
|
@ -1047,7 +1048,7 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
|
|||
|
||||
func TestUserRedactedPassword(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
u.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
u.FsConfig.S3Config.Bucket = "b"
|
||||
u.FsConfig.S3Config.Region = "eu-west-1"
|
||||
u.FsConfig.S3Config.AccessKey = "access-key"
|
||||
|
@ -1071,12 +1072,14 @@ func TestUserRedactedPassword(t *testing.T) {
|
|||
Name: folderName,
|
||||
MappedPath: filepath.Join(os.TempDir(), "crypted"),
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewSecret(kms.SecretStatusRedacted, "crypted-secret", "", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualPath: "/avpath",
|
||||
}
|
||||
|
||||
|
@ -1303,7 +1306,7 @@ func TestUpdateUser(t *testing.T) {
|
|||
u := getTestUser()
|
||||
u.UsedQuotaFiles = 1
|
||||
u.UsedQuotaSize = 2
|
||||
u.Filters.TLSUsername = dataprovider.TLSUsernameCN
|
||||
u.Filters.TLSUsername = sdk.TLSUsernameCN
|
||||
u.Filters.Hooks.CheckPasswordDisabled = true
|
||||
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1321,12 +1324,12 @@ func TestUpdateUser(t *testing.T) {
|
|||
user.Filters.DeniedIP = []string{"192.168.3.0/24", "192.168.4.0/24"}
|
||||
user.Filters.DeniedLoginMethods = []string{dataprovider.LoginMethodPassword}
|
||||
user.Filters.DeniedProtocols = []string{common.ProtocolWebDAV}
|
||||
user.Filters.TLSUsername = dataprovider.TLSUsernameNone
|
||||
user.Filters.TLSUsername = sdk.TLSUsernameNone
|
||||
user.Filters.Hooks.ExternalAuthDisabled = true
|
||||
user.Filters.Hooks.PreLoginDisabled = true
|
||||
user.Filters.Hooks.CheckPasswordDisabled = false
|
||||
user.Filters.DisableFsChecks = true
|
||||
user.Filters.FilePatterns = append(user.Filters.FilePatterns, dataprovider.PatternsFilter{
|
||||
user.Filters.FilePatterns = append(user.Filters.FilePatterns, sdk.PatternsFilter{
|
||||
Path: "/subdir",
|
||||
AllowedPatterns: []string{"*.zip", "*.rar"},
|
||||
DeniedPatterns: []string{"*.jpg", "*.png"},
|
||||
|
@ -1580,7 +1583,7 @@ func TestUserFolderMapping(t *testing.T) {
|
|||
func TestUserS3Config(t *testing.T) {
|
||||
user, _, err := httpdtest.AddUser(getTestUser(), http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
user.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
user.FsConfig.S3Config.Bucket = "test" //nolint:goconst
|
||||
user.FsConfig.S3Config.Region = "us-east-1" //nolint:goconst
|
||||
user.FsConfig.S3Config.AccessKey = "Server-Access-Key"
|
||||
|
@ -1593,12 +1596,14 @@ func TestUserS3Config(t *testing.T) {
|
|||
Name: folderName,
|
||||
MappedPath: filepath.Join(os.TempDir(), "folderName"),
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret("Crypted-Secret"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualPath: "/folderPath",
|
||||
})
|
||||
user, body, err := httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
|
@ -1639,7 +1644,7 @@ func TestUserS3Config(t *testing.T) {
|
|||
assert.NotEmpty(t, initialSecretPayload)
|
||||
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.GetKey())
|
||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
user.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
user.FsConfig.S3Config.Bucket = "test-bucket"
|
||||
user.FsConfig.S3Config.Region = "us-east-1" //nolint:goconst
|
||||
user.FsConfig.S3Config.AccessKey = "Server-Access-Key1"
|
||||
|
@ -1653,7 +1658,7 @@ func TestUserS3Config(t *testing.T) {
|
|||
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.GetKey())
|
||||
// test user without access key and access secret (shared config state)
|
||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
user.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
user.FsConfig.S3Config.Bucket = "testbucket"
|
||||
user.FsConfig.S3Config.Region = "us-east-1"
|
||||
user.FsConfig.S3Config.AccessKey = ""
|
||||
|
@ -1684,7 +1689,7 @@ func TestUserGCSConfig(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = os.MkdirAll(credentialsPath, 0700)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
user.FsConfig.GCSConfig.Bucket = "test"
|
||||
user.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret("fake credentials") //nolint:goconst
|
||||
user, bb, err := httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
|
@ -1731,7 +1736,7 @@ func TestUserGCSConfig(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.NoFileExists(t, credentialFile)
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
|
||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
user.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
user.FsConfig.S3Config.Bucket = "test1"
|
||||
user.FsConfig.S3Config.Region = "us-east-1"
|
||||
user.FsConfig.S3Config.AccessKey = "Server-Access-Key1"
|
||||
|
@ -1741,7 +1746,7 @@ func TestUserGCSConfig(t *testing.T) {
|
|||
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{}
|
||||
user.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
user.FsConfig.GCSConfig.Bucket = "test1"
|
||||
user.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret("fake credentials")
|
||||
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
|
@ -1754,7 +1759,7 @@ func TestUserGCSConfig(t *testing.T) {
|
|||
func TestUserAzureBlobConfig(t *testing.T) {
|
||||
user, _, err := httpdtest.AddUser(getTestUser(), http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
user.FsConfig.AzBlobConfig.Container = "test"
|
||||
user.FsConfig.AzBlobConfig.AccountName = "Server-Account-Name"
|
||||
user.FsConfig.AzBlobConfig.AccountKey = kms.NewPlainSecret("Server-Account-Key")
|
||||
|
@ -1793,7 +1798,7 @@ func TestUserAzureBlobConfig(t *testing.T) {
|
|||
assert.NotEmpty(t, initialPayload)
|
||||
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.GetKey())
|
||||
user.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
user.FsConfig.AzBlobConfig.Container = "test-container"
|
||||
user.FsConfig.AzBlobConfig.Endpoint = "http://localhost:9001"
|
||||
user.FsConfig.AzBlobConfig.KeyPrefix = "somedir/subdir"
|
||||
|
@ -1806,7 +1811,7 @@ func TestUserAzureBlobConfig(t *testing.T) {
|
|||
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.GetKey())
|
||||
// test user without access key and access secret (SAS)
|
||||
user.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
user.FsConfig.AzBlobConfig.SASURL = kms.NewPlainSecret("https://myaccount.blob.core.windows.net/pictures/profile.jpg?sv=2012-02-12&st=2009-02-09&se=2009-02-10&sr=c&sp=r&si=YWJjZGVmZw%3d%3d&sig=dD80ihBh5jfNpymO5Hg1IdiJIEvHcJpCMiCMnN%2fRnbI%3d")
|
||||
user.FsConfig.AzBlobConfig.KeyPrefix = "somedir/subdir"
|
||||
user.FsConfig.AzBlobConfig.AccountName = ""
|
||||
|
@ -1823,8 +1828,10 @@ func TestUserAzureBlobConfig(t *testing.T) {
|
|||
user.ID = 0
|
||||
// sas test for add instead of update
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{
|
||||
AzBlobFsConfig: sdk.AzBlobFsConfig{
|
||||
Container: user.FsConfig.AzBlobConfig.Container,
|
||||
SASURL: kms.NewPlainSecret("http://127.0.0.1/fake/sass/url"),
|
||||
},
|
||||
}
|
||||
user, _, err = httpdtest.AddUser(user, http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1851,7 +1858,7 @@ func TestUserAzureBlobConfig(t *testing.T) {
|
|||
func TestUserCryptFs(t *testing.T) {
|
||||
user, _, err := httpdtest.AddUser(getTestUser(), http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("crypt passphrase")
|
||||
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
|
@ -1886,7 +1893,7 @@ func TestUserCryptFs(t *testing.T) {
|
|||
assert.NotEmpty(t, initialPayload)
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.CryptConfig.Passphrase.GetKey())
|
||||
user.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase.SetKey("pass")
|
||||
user, bb, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err, string(bb))
|
||||
|
@ -1903,7 +1910,7 @@ func TestUserCryptFs(t *testing.T) {
|
|||
func TestUserSFTPFs(t *testing.T) {
|
||||
user, _, err := httpdtest.AddUser(getTestUser(), http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
user.FsConfig.SFTPConfig.Endpoint = "127.0.0.1" // missing port
|
||||
user.FsConfig.SFTPConfig.Username = "sftp_user"
|
||||
user.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("sftp_pwd")
|
||||
|
@ -1972,7 +1979,7 @@ func TestUserSFTPFs(t *testing.T) {
|
|||
assert.NotEmpty(t, initialPkeyPayload)
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetAdditionalData())
|
||||
assert.Empty(t, user.FsConfig.SFTPConfig.PrivateKey.GetKey())
|
||||
user.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
user.FsConfig.SFTPConfig.PrivateKey.SetKey("k")
|
||||
user, bb, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err, string(bb))
|
||||
|
@ -2000,7 +2007,7 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
usernames := []string{"user1", "user2", "user3", "user4", "user5"}
|
||||
u1 := getTestUser()
|
||||
u1.Username = usernames[0]
|
||||
u1.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
u1.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
u1.FsConfig.S3Config.Bucket = "test"
|
||||
u1.FsConfig.S3Config.Region = "us-east-1"
|
||||
u1.FsConfig.S3Config.AccessKey = "S3-Access-Key"
|
||||
|
@ -2010,7 +2017,7 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
|
||||
u2 := getTestUser()
|
||||
u2.Username = usernames[1]
|
||||
u2.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
u2.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
u2.FsConfig.GCSConfig.Bucket = "test"
|
||||
u2.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret("fake credentials")
|
||||
user2, _, err := httpdtest.AddUser(u2, http.StatusCreated)
|
||||
|
@ -2018,7 +2025,7 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
|
||||
u3 := getTestUser()
|
||||
u3.Username = usernames[2]
|
||||
u3.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
u3.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
u3.FsConfig.AzBlobConfig.Container = "test"
|
||||
u3.FsConfig.AzBlobConfig.AccountName = "Server-Account-Name"
|
||||
u3.FsConfig.AzBlobConfig.AccountKey = kms.NewPlainSecret("Server-Account-Key")
|
||||
|
@ -2027,14 +2034,14 @@ func TestUserHiddenFields(t *testing.T) {
|
|||
|
||||
u4 := getTestUser()
|
||||
u4.Username = usernames[3]
|
||||
u4.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
u4.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
u4.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("test passphrase")
|
||||
user4, _, err := httpdtest.AddUser(u4, http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
|
||||
u5 := getTestUser()
|
||||
u5.Username = usernames[4]
|
||||
u5.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
u5.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
u5.FsConfig.SFTPConfig.Endpoint = "127.0.0.1:2022"
|
||||
u5.FsConfig.SFTPConfig.Username = "sftp_user"
|
||||
u5.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("apassword")
|
||||
|
@ -2555,7 +2562,7 @@ func TestEmbeddedFoldersUpdate(t *testing.T) {
|
|||
assert.Equal(t, int64(0), folder.UsedQuotaSize)
|
||||
assert.Equal(t, int64(0), folder.LastQuotaUpdate)
|
||||
assert.Empty(t, folder.Description)
|
||||
assert.Equal(t, vfs.LocalFilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.LocalFilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Len(t, folder.Users, 1)
|
||||
assert.Contains(t, folder.Users, user.Username)
|
||||
// update a field on the folder
|
||||
|
@ -2569,7 +2576,7 @@ func TestEmbeddedFoldersUpdate(t *testing.T) {
|
|||
assert.Equal(t, int64(0), folder.UsedQuotaSize)
|
||||
assert.Equal(t, int64(0), folder.LastQuotaUpdate)
|
||||
assert.Equal(t, description, folder.Description)
|
||||
assert.Equal(t, vfs.LocalFilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.LocalFilesystemProvider, folder.FsConfig.Provider)
|
||||
// check that the user gets the changes
|
||||
user, _, err = httpdtest.GetUserByUsername(user.Username, http.StatusOK)
|
||||
assert.NoError(t, err)
|
||||
|
@ -2579,7 +2586,7 @@ func TestEmbeddedFoldersUpdate(t *testing.T) {
|
|||
assert.Equal(t, int64(0), userFolder.UsedQuotaSize)
|
||||
assert.Equal(t, int64(0), userFolder.LastQuotaUpdate)
|
||||
assert.Equal(t, description, userFolder.Description)
|
||||
assert.Equal(t, vfs.LocalFilesystemProvider, userFolder.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.LocalFilesystemProvider, userFolder.FsConfig.Provider)
|
||||
// now update the folder embedding it inside the user
|
||||
user.VirtualFolders = []vfs.VirtualFolder{
|
||||
{
|
||||
|
@ -2590,8 +2597,9 @@ func TestEmbeddedFoldersUpdate(t *testing.T) {
|
|||
UsedQuotaSize: 8192,
|
||||
LastQuotaUpdate: 123,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.S3FilesystemProvider,
|
||||
Provider: sdk.S3FilesystemProvider,
|
||||
S3Config: vfs.S3FsConfig{
|
||||
S3FsConfig: sdk.S3FsConfig{
|
||||
Bucket: "test",
|
||||
Region: "us-east-1",
|
||||
AccessKey: "akey",
|
||||
|
@ -2600,6 +2608,7 @@ func TestEmbeddedFoldersUpdate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualPath: "/vdir1",
|
||||
QuotaSize: 4096,
|
||||
QuotaFiles: 1,
|
||||
|
@ -2612,7 +2621,7 @@ func TestEmbeddedFoldersUpdate(t *testing.T) {
|
|||
assert.Equal(t, int64(0), userFolder.UsedQuotaSize)
|
||||
assert.Equal(t, int64(0), userFolder.LastQuotaUpdate)
|
||||
assert.Empty(t, userFolder.Description)
|
||||
assert.Equal(t, vfs.S3FilesystemProvider, userFolder.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.S3FilesystemProvider, userFolder.FsConfig.Provider)
|
||||
assert.Equal(t, "test", userFolder.FsConfig.S3Config.Bucket)
|
||||
assert.Equal(t, "us-east-1", userFolder.FsConfig.S3Config.Region)
|
||||
assert.Equal(t, "http://127.0.1.1:9090", userFolder.FsConfig.S3Config.Endpoint)
|
||||
|
@ -2627,7 +2636,7 @@ func TestEmbeddedFoldersUpdate(t *testing.T) {
|
|||
assert.Equal(t, int64(0), folder.UsedQuotaSize)
|
||||
assert.Equal(t, int64(0), folder.LastQuotaUpdate)
|
||||
assert.Empty(t, folder.Description)
|
||||
assert.Equal(t, vfs.S3FilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.S3FilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, "test", folder.FsConfig.S3Config.Bucket)
|
||||
assert.Equal(t, "us-east-1", folder.FsConfig.S3Config.Region)
|
||||
assert.Equal(t, "http://127.0.1.1:9090", folder.FsConfig.S3Config.Endpoint)
|
||||
|
@ -2645,7 +2654,7 @@ func TestEmbeddedFoldersUpdate(t *testing.T) {
|
|||
assert.Equal(t, 100, folder.UsedQuotaFiles)
|
||||
assert.Equal(t, int64(32768), folder.UsedQuotaSize)
|
||||
assert.Greater(t, folder.LastQuotaUpdate, int64(0))
|
||||
assert.Equal(t, vfs.S3FilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.S3FilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, "test", folder.FsConfig.S3Config.Bucket)
|
||||
assert.Equal(t, "us-east-1", folder.FsConfig.S3Config.Region)
|
||||
assert.Equal(t, "http://127.0.1.1:9090", folder.FsConfig.S3Config.Endpoint)
|
||||
|
@ -2662,7 +2671,7 @@ func TestEmbeddedFoldersUpdate(t *testing.T) {
|
|||
assert.Equal(t, int64(32768), userFolder.UsedQuotaSize)
|
||||
assert.Greater(t, userFolder.LastQuotaUpdate, int64(0))
|
||||
assert.Empty(t, userFolder.Description)
|
||||
assert.Equal(t, vfs.S3FilesystemProvider, userFolder.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.S3FilesystemProvider, userFolder.FsConfig.Provider)
|
||||
assert.Equal(t, "test", userFolder.FsConfig.S3Config.Bucket)
|
||||
assert.Equal(t, "us-east-1", userFolder.FsConfig.S3Config.Region)
|
||||
assert.Equal(t, "http://127.0.1.1:9090", userFolder.FsConfig.S3Config.Endpoint)
|
||||
|
@ -2927,9 +2936,9 @@ func TestProviderErrors(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
_, _, err = httpdtest.GetAdmins(1, 0, http.StatusInternalServerError)
|
||||
assert.NoError(t, err)
|
||||
_, _, err = httpdtest.UpdateUser(dataprovider.User{Username: "auser"}, http.StatusInternalServerError, "")
|
||||
_, _, err = httpdtest.UpdateUser(dataprovider.User{BaseUser: sdk.BaseUser{Username: "auser"}}, http.StatusInternalServerError, "")
|
||||
assert.NoError(t, err)
|
||||
_, err = httpdtest.RemoveUser(dataprovider.User{Username: "auser"}, http.StatusInternalServerError)
|
||||
_, err = httpdtest.RemoveUser(dataprovider.User{BaseUser: sdk.BaseUser{Username: "auser"}}, http.StatusInternalServerError)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpdtest.RemoveFolder(vfs.BaseVirtualFolder{Name: "aname"}, http.StatusInternalServerError)
|
||||
assert.NoError(t, err)
|
||||
|
@ -3007,11 +3016,13 @@ func TestFolders(t *testing.T) {
|
|||
MappedPath: "relative path",
|
||||
Users: []string{"1", "2", "3"},
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret("asecret"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, _, err := httpdtest.AddFolder(folder, http.StatusBadRequest)
|
||||
assert.NoError(t, err)
|
||||
|
@ -3630,7 +3641,7 @@ func TestBasicUserHandlingMock(t *testing.T) {
|
|||
assert.Equal(t, user.MaxSessions, updatedUser.MaxSessions)
|
||||
assert.Equal(t, user.UploadBandwidth, updatedUser.UploadBandwidth)
|
||||
assert.Equal(t, 1, len(updatedUser.Permissions["/"]))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.PermAny, updatedUser.Permissions["/"]))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.PermAny, updatedUser.Permissions["/"]))
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+user.Username, nil)
|
||||
setBearerForReq(req, token)
|
||||
rr = executeRequest(req)
|
||||
|
@ -3961,7 +3972,7 @@ func TestUpdateUserMock(t *testing.T) {
|
|||
for dir, perms := range permissions {
|
||||
if actualPerms, ok := updatedUser.Permissions[dir]; ok {
|
||||
for _, v := range actualPerms {
|
||||
assert.True(t, utils.IsStringInSlice(v, perms))
|
||||
assert.True(t, util.IsStringInSlice(v, perms))
|
||||
}
|
||||
} else {
|
||||
assert.Fail(t, "Permissions directories mismatch")
|
||||
|
@ -4120,7 +4131,7 @@ func TestUserPermissionsMock(t *testing.T) {
|
|||
err = render.DecodeJSON(rr.Body, &updatedUser)
|
||||
assert.NoError(t, err)
|
||||
if val, ok := updatedUser.Permissions["/otherdir"]; ok {
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.PermListItems, val))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.PermListItems, val))
|
||||
assert.Equal(t, 1, len(val))
|
||||
} else {
|
||||
assert.Fail(t, "expected dir not found in permissions")
|
||||
|
@ -5176,7 +5187,7 @@ func TestMaxSessions(t *testing.T) {
|
|||
|
||||
func TestLoginInvalidFs(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
u.FsConfig.GCSConfig.Bucket = "test"
|
||||
u.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret("invalid JSON for credentials")
|
||||
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||
|
@ -5315,7 +5326,7 @@ func TestWebAPIPublicKeys(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusBadRequest, rr)
|
||||
assert.Contains(t, rr.Body.String(), "could not parse key")
|
||||
|
||||
user.Filters.WebClient = append(user.Filters.WebClient, dataprovider.WebClientPubKeyChangeDisabled)
|
||||
user.Filters.WebClient = append(user.Filters.WebClient, sdk.WebClientPubKeyChangeDisabled)
|
||||
_, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -5371,7 +5382,7 @@ func TestWebClientChangePubKeys(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusOK, rr)
|
||||
assert.Contains(t, rr.Body.String(), "Validation error: could not parse key")
|
||||
|
||||
user.Filters.WebClient = append(user.Filters.WebClient, dataprovider.WebClientPubKeyChangeDisabled)
|
||||
user.Filters.WebClient = append(user.Filters.WebClient, sdk.WebClientPubKeyChangeDisabled)
|
||||
_, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
webToken, err = getJWTWebClientTokenFromTestServer(defaultUsername, defaultPassword)
|
||||
|
@ -5766,7 +5777,7 @@ func TestGetFilesSFTPBackend(t *testing.T) {
|
|||
u.FsConfig.SFTPConfig.BufferSize = 2
|
||||
u.Permissions["/adir"] = nil
|
||||
u.Permissions["/adir1"] = []string{dataprovider.PermListItems}
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/adir2",
|
||||
DeniedPatterns: []string{"*.txt"},
|
||||
|
@ -6807,7 +6818,7 @@ func TestWebUserAddMock(t *testing.T) {
|
|||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr)
|
||||
assert.Contains(t, rr.Body.String(), "Validation error: invalid TLS username")
|
||||
form.Set("tls_username", string(dataprovider.TLSUsernameNone))
|
||||
form.Set("tls_username", string(sdk.TLSUsernameNone))
|
||||
form.Set(csrfFormToken, "invalid form token")
|
||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath, &b)
|
||||
|
@ -6853,10 +6864,10 @@ func TestWebUserAddMock(t *testing.T) {
|
|||
assert.False(t, newUser.Filters.Hooks.PreLoginDisabled)
|
||||
assert.False(t, newUser.Filters.Hooks.CheckPasswordDisabled)
|
||||
assert.True(t, newUser.Filters.DisableFsChecks)
|
||||
assert.True(t, utils.IsStringInSlice(testPubKey, newUser.PublicKeys))
|
||||
assert.True(t, util.IsStringInSlice(testPubKey, newUser.PublicKeys))
|
||||
if val, ok := newUser.Permissions["/subdir"]; ok {
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.PermListItems, val))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.PermDownload, val))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.PermListItems, val))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.PermDownload, val))
|
||||
} else {
|
||||
assert.Fail(t, "user permissions must contain /somedir", "actual: %v", newUser.Permissions)
|
||||
}
|
||||
|
@ -6874,23 +6885,23 @@ func TestWebUserAddMock(t *testing.T) {
|
|||
if filter.Path == "/dir1" {
|
||||
assert.Len(t, filter.DeniedPatterns, 1)
|
||||
assert.Len(t, filter.AllowedPatterns, 1)
|
||||
assert.True(t, utils.IsStringInSlice("*.png", filter.AllowedPatterns))
|
||||
assert.True(t, utils.IsStringInSlice("*.zip", filter.DeniedPatterns))
|
||||
assert.True(t, util.IsStringInSlice("*.png", filter.AllowedPatterns))
|
||||
assert.True(t, util.IsStringInSlice("*.zip", filter.DeniedPatterns))
|
||||
}
|
||||
if filter.Path == "/dir2" {
|
||||
assert.Len(t, filter.DeniedPatterns, 1)
|
||||
assert.Len(t, filter.AllowedPatterns, 2)
|
||||
assert.True(t, utils.IsStringInSlice("*.jpg", filter.AllowedPatterns))
|
||||
assert.True(t, utils.IsStringInSlice("*.png", filter.AllowedPatterns))
|
||||
assert.True(t, utils.IsStringInSlice("*.mkv", filter.DeniedPatterns))
|
||||
assert.True(t, util.IsStringInSlice("*.jpg", filter.AllowedPatterns))
|
||||
assert.True(t, util.IsStringInSlice("*.png", filter.AllowedPatterns))
|
||||
assert.True(t, util.IsStringInSlice("*.mkv", filter.DeniedPatterns))
|
||||
}
|
||||
if filter.Path == "/dir3" {
|
||||
assert.Len(t, filter.DeniedPatterns, 1)
|
||||
assert.Len(t, filter.AllowedPatterns, 0)
|
||||
assert.True(t, utils.IsStringInSlice("*.rar", filter.DeniedPatterns))
|
||||
assert.True(t, util.IsStringInSlice("*.rar", filter.DeniedPatterns))
|
||||
}
|
||||
}
|
||||
assert.Equal(t, dataprovider.TLSUsernameNone, newUser.Filters.TLSUsername)
|
||||
assert.Equal(t, sdk.TLSUsernameNone, newUser.Filters.TLSUsername)
|
||||
req, _ = http.NewRequest(http.MethodDelete, path.Join(userPath, newUser.Username), nil)
|
||||
setBearerForReq(req, apiToken)
|
||||
rr = executeRequest(req)
|
||||
|
@ -6954,7 +6965,7 @@ func TestWebUserUpdateMock(t *testing.T) {
|
|||
form.Set("disconnect", "1")
|
||||
form.Set("additional_info", user.AdditionalInfo)
|
||||
form.Set("description", user.Description)
|
||||
form.Set("tls_username", string(dataprovider.TLSUsernameCN))
|
||||
form.Set("tls_username", string(sdk.TLSUsernameCN))
|
||||
b, contentType, _ := getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, path.Join(webUserPath, user.Username), &b)
|
||||
setJWTCookieForReq(req, webToken)
|
||||
|
@ -7017,19 +7028,19 @@ func TestWebUserUpdateMock(t *testing.T) {
|
|||
assert.Equal(t, user.AdditionalInfo, updateUser.AdditionalInfo)
|
||||
assert.Equal(t, user.Description, updateUser.Description)
|
||||
assert.Equal(t, int64(100), updateUser.Filters.MaxUploadFileSize)
|
||||
assert.Equal(t, dataprovider.TLSUsernameCN, updateUser.Filters.TLSUsername)
|
||||
assert.Equal(t, sdk.TLSUsernameCN, updateUser.Filters.TLSUsername)
|
||||
|
||||
if val, ok := updateUser.Permissions["/otherdir"]; ok {
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.PermListItems, val))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.PermUpload, val))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.PermListItems, val))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.PermUpload, val))
|
||||
} else {
|
||||
assert.Fail(t, "user permissions must contains /otherdir", "actual: %v", updateUser.Permissions)
|
||||
}
|
||||
assert.True(t, utils.IsStringInSlice("192.168.1.3/32", updateUser.Filters.AllowedIP))
|
||||
assert.True(t, utils.IsStringInSlice("10.0.0.2/32", updateUser.Filters.DeniedIP))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.SSHLoginMethodKeyboardInteractive, updateUser.Filters.DeniedLoginMethods))
|
||||
assert.True(t, utils.IsStringInSlice(common.ProtocolFTP, updateUser.Filters.DeniedProtocols))
|
||||
assert.True(t, utils.IsStringInSlice("*.zip", updateUser.Filters.FilePatterns[0].DeniedPatterns))
|
||||
assert.True(t, util.IsStringInSlice("192.168.1.3/32", updateUser.Filters.AllowedIP))
|
||||
assert.True(t, util.IsStringInSlice("10.0.0.2/32", updateUser.Filters.DeniedIP))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.SSHLoginMethodKeyboardInteractive, updateUser.Filters.DeniedLoginMethods))
|
||||
assert.True(t, util.IsStringInSlice(common.ProtocolFTP, updateUser.Filters.DeniedProtocols))
|
||||
assert.True(t, util.IsStringInSlice("*.zip", updateUser.Filters.FilePatterns[0].DeniedPatterns))
|
||||
req, err = http.NewRequest(http.MethodDelete, path.Join(userPath, user.Username), nil)
|
||||
assert.NoError(t, err)
|
||||
setBearerForReq(req, apiToken)
|
||||
|
@ -7220,7 +7231,7 @@ func TestUserTemplateMock(t *testing.T) {
|
|||
token, err := getJWTWebTokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
||||
assert.NoError(t, err)
|
||||
user := getTestUser()
|
||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
user.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
user.FsConfig.S3Config.Bucket = "test"
|
||||
user.FsConfig.S3Config.Region = "eu-central-1"
|
||||
user.FsConfig.S3Config.AccessKey = "%username%"
|
||||
|
@ -7322,9 +7333,9 @@ func TestUserTemplateMock(t *testing.T) {
|
|||
user1 := dump.Users[0]
|
||||
user2 := dump.Users[1]
|
||||
require.Equal(t, "user1", user1.Username)
|
||||
require.Equal(t, vfs.S3FilesystemProvider, user1.FsConfig.Provider)
|
||||
require.Equal(t, sdk.S3FilesystemProvider, user1.FsConfig.Provider)
|
||||
require.Equal(t, "user2", user2.Username)
|
||||
require.Equal(t, vfs.S3FilesystemProvider, user2.FsConfig.Provider)
|
||||
require.Equal(t, sdk.S3FilesystemProvider, user2.FsConfig.Provider)
|
||||
require.Len(t, user2.PublicKeys, 1)
|
||||
require.Equal(t, filepath.Join(os.TempDir(), user1.Username), user1.HomeDir)
|
||||
require.Equal(t, filepath.Join(os.TempDir(), user2.Username), user2.HomeDir)
|
||||
|
@ -7493,7 +7504,7 @@ func TestWebUserS3Mock(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusCreated, rr)
|
||||
err = render.DecodeJSON(rr.Body, &user)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
user.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
user.FsConfig.S3Config.Bucket = "test"
|
||||
user.FsConfig.S3Config.Region = "eu-west-1"
|
||||
user.FsConfig.S3Config.AccessKey = "access-key"
|
||||
|
@ -7652,7 +7663,7 @@ func TestWebUserGCSMock(t *testing.T) {
|
|||
credentialsFilePath := filepath.Join(os.TempDir(), "gcs.json")
|
||||
err = createTestFile(credentialsFilePath, 0)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
user.FsConfig.GCSConfig.Bucket = "test"
|
||||
user.FsConfig.GCSConfig.KeyPrefix = "somedir/subdir/"
|
||||
user.FsConfig.GCSConfig.StorageClass = "standard"
|
||||
|
@ -7757,7 +7768,7 @@ func TestWebUserAzureBlobMock(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusCreated, rr)
|
||||
err = render.DecodeJSON(rr.Body, &user)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
user.FsConfig.AzBlobConfig.Container = "container"
|
||||
user.FsConfig.AzBlobConfig.AccountName = "aname"
|
||||
user.FsConfig.AzBlobConfig.AccountKey = kms.NewPlainSecret("access-skey")
|
||||
|
@ -7924,7 +7935,7 @@ func TestWebUserCryptMock(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusCreated, rr)
|
||||
err = render.DecodeJSON(rr.Body, &user)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("crypted passphrase")
|
||||
form := make(url.Values)
|
||||
form.Set(csrfFormToken, csrfToken)
|
||||
|
@ -8019,7 +8030,7 @@ func TestWebUserSFTPFsMock(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusCreated, rr)
|
||||
err = render.DecodeJSON(rr.Body, &user)
|
||||
assert.NoError(t, err)
|
||||
user.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
user.FsConfig.SFTPConfig.Endpoint = "127.0.0.1:22"
|
||||
user.FsConfig.SFTPConfig.Username = "sftpuser"
|
||||
user.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("pwd")
|
||||
|
@ -8266,7 +8277,7 @@ func TestS3WebFolderMock(t *testing.T) {
|
|||
assert.Equal(t, mappedPath, folder.MappedPath)
|
||||
assert.Equal(t, folderName, folder.Name)
|
||||
assert.Equal(t, folderDesc, folder.Description)
|
||||
assert.Equal(t, vfs.S3FilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.S3FilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, S3Bucket, folder.FsConfig.S3Config.Bucket)
|
||||
assert.Equal(t, S3Region, folder.FsConfig.S3Config.Region)
|
||||
assert.Equal(t, S3AccessKey, folder.FsConfig.S3Config.AccessKey)
|
||||
|
@ -8308,7 +8319,7 @@ func TestS3WebFolderMock(t *testing.T) {
|
|||
assert.Equal(t, mappedPath, folder.MappedPath)
|
||||
assert.Equal(t, folderName, folder.Name)
|
||||
assert.Equal(t, folderDesc, folder.Description)
|
||||
assert.Equal(t, vfs.S3FilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.S3FilesystemProvider, folder.FsConfig.Provider)
|
||||
assert.Equal(t, S3Bucket, folder.FsConfig.S3Config.Bucket)
|
||||
assert.Equal(t, S3Region, folder.FsConfig.S3Config.Region)
|
||||
assert.Equal(t, S3AccessKey, folder.FsConfig.S3Config.AccessKey)
|
||||
|
@ -8690,11 +8701,13 @@ func getTestAdmin() dataprovider.Admin {
|
|||
|
||||
func getTestUser() dataprovider.User {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: defaultUsername,
|
||||
Password: defaultPassword,
|
||||
HomeDir: filepath.Join(homeBasePath, defaultUsername),
|
||||
Status: 1,
|
||||
Description: "test user",
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = defaultPerms
|
||||
|
@ -8704,7 +8717,7 @@ func getTestUser() dataprovider.User {
|
|||
func getTestSFTPUser() dataprovider.User {
|
||||
u := getTestUser()
|
||||
u.Username = u.Username + "_sftp"
|
||||
u.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
u.FsConfig.SFTPConfig.Endpoint = sftpServerAddr
|
||||
u.FsConfig.SFTPConfig.Username = defaultUsername
|
||||
u.FsConfig.SFTPConfig.Password = kms.NewPlainSecret(defaultPassword)
|
||||
|
|
|
@ -32,7 +32,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -298,7 +299,7 @@ func TestShouldBind(t *testing.T) {
|
|||
|
||||
func TestGetRespStatus(t *testing.T) {
|
||||
var err error
|
||||
err = utils.NewMethodDisabledError("")
|
||||
err = util.NewMethodDisabledError("")
|
||||
respStatus := getRespStatus(err)
|
||||
assert.Equal(t, http.StatusForbidden, respStatus)
|
||||
err = fmt.Errorf("generic error")
|
||||
|
@ -457,16 +458,16 @@ func TestCSRFToken(t *testing.T) {
|
|||
assert.Equal(t, http.StatusForbidden, rr.Code)
|
||||
assert.Contains(t, rr.Body.String(), "the token is not valid")
|
||||
|
||||
csrfTokenAuth = jwtauth.New("PS256", utils.GenerateRandomBytes(32), nil)
|
||||
csrfTokenAuth = jwtauth.New("PS256", util.GenerateRandomBytes(32), nil)
|
||||
tokenString = createCSRFToken()
|
||||
assert.Empty(t, tokenString)
|
||||
|
||||
csrfTokenAuth = jwtauth.New(jwa.HS256.String(), utils.GenerateRandomBytes(32), nil)
|
||||
csrfTokenAuth = jwtauth.New(jwa.HS256.String(), util.GenerateRandomBytes(32), nil)
|
||||
}
|
||||
|
||||
func TestCreateTokenError(t *testing.T) {
|
||||
server := httpdServer{
|
||||
tokenAuth: jwtauth.New("PS256", utils.GenerateRandomBytes(32), nil),
|
||||
tokenAuth: jwtauth.New("PS256", util.GenerateRandomBytes(32), nil),
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
admin := dataprovider.Admin{
|
||||
|
@ -480,8 +481,10 @@ func TestCreateTokenError(t *testing.T) {
|
|||
|
||||
rr = httptest.NewRecorder()
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "u",
|
||||
Password: "pwd",
|
||||
},
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodGet, userTokenPath, nil)
|
||||
|
||||
|
@ -540,11 +543,13 @@ func TestCreateTokenError(t *testing.T) {
|
|||
|
||||
username := "webclientuser"
|
||||
user = dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
Password: "clientpwd",
|
||||
HomeDir: filepath.Join(os.TempDir(), username),
|
||||
Status: 1,
|
||||
Description: "test user",
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{"*"}
|
||||
|
@ -567,7 +572,7 @@ func TestCreateTokenError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestJWTTokenValidation(t *testing.T) {
|
||||
tokenAuth := jwtauth.New(jwa.HS256.String(), utils.GenerateRandomBytes(32), nil)
|
||||
tokenAuth := jwtauth.New(jwa.HS256.String(), util.GenerateRandomBytes(32), nil)
|
||||
claims := make(map[string]interface{})
|
||||
claims["username"] = "admin"
|
||||
claims[jwt.ExpirationKey] = time.Now().UTC().Add(-1 * time.Hour)
|
||||
|
@ -616,7 +621,7 @@ func TestJWTTokenValidation(t *testing.T) {
|
|||
fn.ServeHTTP(rr, req.WithContext(ctx))
|
||||
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||
|
||||
permClientFn := checkHTTPUserPerm(dataprovider.WebClientPubKeyChangeDisabled)
|
||||
permClientFn := checkHTTPUserPerm(sdk.WebClientPubKeyChangeDisabled)
|
||||
fn = permClientFn(r)
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodPost, webChangeClientKeysPath, nil)
|
||||
|
@ -635,7 +640,7 @@ func TestJWTTokenValidation(t *testing.T) {
|
|||
|
||||
func TestUpdateContextFromCookie(t *testing.T) {
|
||||
server := httpdServer{
|
||||
tokenAuth: jwtauth.New(jwa.HS256.String(), utils.GenerateRandomBytes(32), nil),
|
||||
tokenAuth: jwtauth.New(jwa.HS256.String(), util.GenerateRandomBytes(32), nil),
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodGet, tokenPath, nil)
|
||||
claims := make(map[string]interface{})
|
||||
|
@ -649,7 +654,7 @@ func TestUpdateContextFromCookie(t *testing.T) {
|
|||
|
||||
func TestCookieExpiration(t *testing.T) {
|
||||
server := httpdServer{
|
||||
tokenAuth: jwtauth.New(jwa.HS256.String(), utils.GenerateRandomBytes(32), nil),
|
||||
tokenAuth: jwtauth.New(jwa.HS256.String(), util.GenerateRandomBytes(32), nil),
|
||||
}
|
||||
err := errors.New("test error")
|
||||
rr := httptest.NewRecorder()
|
||||
|
@ -736,11 +741,13 @@ func TestCookieExpiration(t *testing.T) {
|
|||
// now check client cookie expiration
|
||||
username := "client"
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
Password: "clientpwd",
|
||||
HomeDir: filepath.Join(os.TempDir(), username),
|
||||
Status: 1,
|
||||
Description: "test user",
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{"*"}
|
||||
|
@ -862,10 +869,12 @@ func TestRenderInvalidTemplate(t *testing.T) {
|
|||
|
||||
func TestQuotaScanInvalidFs(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.S3FilesystemProvider,
|
||||
Provider: sdk.S3FilesystemProvider,
|
||||
},
|
||||
}
|
||||
common.QuotaScans.AddUserQuotaScan(user.Username)
|
||||
|
@ -947,24 +956,24 @@ func TestGetFolderFromTemplate(t *testing.T) {
|
|||
require.Equal(t, fmt.Sprintf("Folder%v", folderName), folderTemplate.MappedPath)
|
||||
require.Equal(t, fmt.Sprintf("Folder %v desc", folderName), folderTemplate.Description)
|
||||
|
||||
folder.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
folder.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
folder.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("%name%")
|
||||
folderTemplate = getFolderFromTemplate(folder, folderName)
|
||||
require.Equal(t, folderName, folderTemplate.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
|
||||
folder.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
folder.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
folder.FsConfig.GCSConfig.KeyPrefix = "prefix%name%/"
|
||||
folderTemplate = getFolderFromTemplate(folder, folderName)
|
||||
require.Equal(t, fmt.Sprintf("prefix%v/", folderName), folderTemplate.FsConfig.GCSConfig.KeyPrefix)
|
||||
|
||||
folder.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
folder.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
folder.FsConfig.AzBlobConfig.KeyPrefix = "a%name%"
|
||||
folder.FsConfig.AzBlobConfig.AccountKey = kms.NewPlainSecret("pwd%name%")
|
||||
folderTemplate = getFolderFromTemplate(folder, folderName)
|
||||
require.Equal(t, "a"+folderName, folderTemplate.FsConfig.AzBlobConfig.KeyPrefix)
|
||||
require.Equal(t, "pwd"+folderName, folderTemplate.FsConfig.AzBlobConfig.AccountKey.GetPayload())
|
||||
|
||||
folder.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
folder.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
folder.FsConfig.SFTPConfig.Prefix = "%name%"
|
||||
folder.FsConfig.SFTPConfig.Username = "sftp_%name%"
|
||||
folder.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("sftp%name%")
|
||||
|
@ -976,7 +985,9 @@ func TestGetFolderFromTemplate(t *testing.T) {
|
|||
|
||||
func TestGetUserFromTemplate(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Status: 1,
|
||||
},
|
||||
}
|
||||
user.VirtualFolders = append(user.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
|
@ -995,24 +1006,24 @@ func TestGetUserFromTemplate(t *testing.T) {
|
|||
require.Len(t, userTemplate.VirtualFolders, 1)
|
||||
require.Equal(t, "Folder"+username, userTemplate.VirtualFolders[0].Name)
|
||||
|
||||
user.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
user.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret("%password%")
|
||||
userTemplate = getUserFromTemplate(user, templateFields)
|
||||
require.Equal(t, password, userTemplate.FsConfig.CryptConfig.Passphrase.GetPayload())
|
||||
|
||||
user.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
user.FsConfig.GCSConfig.KeyPrefix = "%username%%password%"
|
||||
userTemplate = getUserFromTemplate(user, templateFields)
|
||||
require.Equal(t, username+password, userTemplate.FsConfig.GCSConfig.KeyPrefix)
|
||||
|
||||
user.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
user.FsConfig.AzBlobConfig.KeyPrefix = "a%username%"
|
||||
user.FsConfig.AzBlobConfig.AccountKey = kms.NewPlainSecret("pwd%password%%username%")
|
||||
userTemplate = getUserFromTemplate(user, templateFields)
|
||||
require.Equal(t, "a"+username, userTemplate.FsConfig.AzBlobConfig.KeyPrefix)
|
||||
require.Equal(t, "pwd"+password+username, userTemplate.FsConfig.AzBlobConfig.AccountKey.GetPayload())
|
||||
|
||||
user.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
user.FsConfig.SFTPConfig.Prefix = "%username%"
|
||||
user.FsConfig.SFTPConfig.Username = "sftp_%username%"
|
||||
user.FsConfig.SFTPConfig.Password = kms.NewPlainSecret("sftp%password%")
|
||||
|
@ -1024,7 +1035,7 @@ func TestGetUserFromTemplate(t *testing.T) {
|
|||
|
||||
func TestJWTTokenCleanup(t *testing.T) {
|
||||
server := httpdServer{
|
||||
tokenAuth: jwtauth.New(jwa.HS256.String(), utils.GenerateRandomBytes(32), nil),
|
||||
tokenAuth: jwtauth.New(jwa.HS256.String(), util.GenerateRandomBytes(32), nil),
|
||||
}
|
||||
admin := dataprovider.Admin{
|
||||
Username: "newtestadmin",
|
||||
|
@ -1208,7 +1219,9 @@ func TestCompressorAbortHandler(t *testing.T) {
|
|||
|
||||
func TestZipErrors(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -1233,7 +1246,7 @@ func TestZipErrors(t *testing.T) {
|
|||
}
|
||||
|
||||
testFilePath := filepath.Join(testDir, "ziptest.zip")
|
||||
err = os.WriteFile(testFilePath, utils.GenerateRandomBytes(65535), os.ModePerm)
|
||||
err = os.WriteFile(testFilePath, util.GenerateRandomBytes(65535), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = addZipEntry(wr, connection, path.Join("/", filepath.Base(testDir), filepath.Base(testFilePath)),
|
||||
"/"+filepath.Base(testDir))
|
||||
|
@ -1258,7 +1271,7 @@ func TestZipErrors(t *testing.T) {
|
|||
err = addZipEntry(wr, connection, user.VirtualFolders[0].VirtualPath, "/")
|
||||
assert.Error(t, err)
|
||||
|
||||
user.Filters.FilePatterns = append(user.Filters.FilePatterns, dataprovider.PatternsFilter{
|
||||
user.Filters.FilePatterns = append(user.Filters.FilePatterns, sdk.PatternsFilter{
|
||||
Path: "/",
|
||||
DeniedPatterns: []string{"*.zip"},
|
||||
})
|
||||
|
@ -1412,15 +1425,19 @@ func TestRequestHeaderErrors(t *testing.T) {
|
|||
|
||||
func TestConnection(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test_httpd_user",
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.GCSFilesystemProvider,
|
||||
Provider: sdk.GCSFilesystemProvider,
|
||||
GCSConfig: vfs.GCSFsConfig{
|
||||
GCSFsConfig: sdk.GCSFsConfig{
|
||||
Bucket: "test_bucket_name",
|
||||
Credentials: kms.NewPlainSecret("invalid JSON payload"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -1434,15 +1451,17 @@ func TestConnection(t *testing.T) {
|
|||
name := "missing file name"
|
||||
_, err := connection.getFileReader(name, 0, http.MethodGet)
|
||||
assert.Error(t, err)
|
||||
connection.User.FsConfig.Provider = vfs.LocalFilesystemProvider
|
||||
connection.User.FsConfig.Provider = sdk.LocalFilesystemProvider
|
||||
_, err = connection.getFileReader(name, 0, http.MethodGet)
|
||||
assert.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestHTTPDFile(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test_httpd_user",
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -1500,8 +1519,10 @@ func TestGetFilesInvalidClaims(t *testing.T) {
|
|||
|
||||
rr := httptest.NewRecorder()
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "",
|
||||
Password: "pwd",
|
||||
},
|
||||
}
|
||||
c := jwtTokenClaims{
|
||||
Username: user.Username,
|
||||
|
@ -1538,8 +1559,10 @@ func TestManageKeysInvalidClaims(t *testing.T) {
|
|||
|
||||
rr := httptest.NewRecorder()
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "",
|
||||
Password: "pwd",
|
||||
},
|
||||
}
|
||||
c := jwtTokenClaims{
|
||||
Username: user.Username,
|
||||
|
@ -1585,8 +1608,10 @@ func TestSigningKey(t *testing.T) {
|
|||
server2.initializeRouter()
|
||||
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "",
|
||||
Password: "pwd",
|
||||
},
|
||||
}
|
||||
c := jwtTokenClaims{
|
||||
Username: user.Username,
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/lestrrat-go/jwx/jwt"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -58,7 +58,7 @@ func validateJWTToken(w http.ResponseWriter, r *http.Request, audience tokenAudi
|
|||
}
|
||||
return errInvalidToken
|
||||
}
|
||||
if !utils.IsStringInSlice(audience, token.Audience()) {
|
||||
if !util.IsStringInSlice(audience, token.Audience()) {
|
||||
logger.Debug(logSender, "", "the token is not valid for audience %#v", audience)
|
||||
if isAPIToken {
|
||||
sendAPIResponse(w, r, nil, "Your token audience is not valid", http.StatusUnauthorized)
|
||||
|
@ -192,7 +192,7 @@ func verifyCSRFHeader(next http.Handler) http.Handler {
|
|||
return
|
||||
}
|
||||
|
||||
if !utils.IsStringInSlice(tokenAudienceCSRF, token.Audience()) {
|
||||
if !util.IsStringInSlice(tokenAudienceCSRF, token.Audience()) {
|
||||
logger.Debug(logSender, "", "error validating CSRF header audience")
|
||||
sendAPIResponse(w, r, errors.New("the token is not valid"), "", http.StatusForbidden)
|
||||
return
|
||||
|
|
|
@ -21,7 +21,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
|
@ -65,7 +66,7 @@ func (s *httpdServer) listenAndServe() error {
|
|||
config := &tls.Config{
|
||||
GetCertificate: certMgr.GetCertificateFunc(),
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: utils.GetTLSCiphersFromNames(s.binding.TLSCipherSuites),
|
||||
CipherSuites: util.GetTLSCiphersFromNames(s.binding.TLSCipherSuites),
|
||||
PreferServerCipherSuites: true,
|
||||
}
|
||||
logger.Debug(logSender, "", "configured TLS cipher suites for binding %#v: %v", s.binding.GetAddress(),
|
||||
|
@ -76,9 +77,9 @@ func (s *httpdServer) listenAndServe() error {
|
|||
httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
httpServer.TLSConfig.VerifyConnection = s.verifyTLSConnection
|
||||
}
|
||||
return utils.HTTPListenAndServe(httpServer, s.binding.Address, s.binding.Port, true, logSender)
|
||||
return util.HTTPListenAndServe(httpServer, s.binding.Address, s.binding.Port, true, logSender)
|
||||
}
|
||||
return utils.HTTPListenAndServe(httpServer, s.binding.Address, s.binding.Port, false, logSender)
|
||||
return util.HTTPListenAndServe(httpServer, s.binding.Address, s.binding.Port, false, logSender)
|
||||
}
|
||||
|
||||
func (s *httpdServer) verifyTLSConnection(state tls.ConnectionState) error {
|
||||
|
@ -122,16 +123,16 @@ func (s *httpdServer) handleWebClientLoginPost(w http.ResponseWriter, r *http.Re
|
|||
renderClientLoginPage(w, err.Error())
|
||||
return
|
||||
}
|
||||
ipAddr := utils.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||
username := r.Form.Get("username")
|
||||
password := r.Form.Get("password")
|
||||
if username == "" || password == "" {
|
||||
updateLoginMetrics(&dataprovider.User{Username: username}, ipAddr, common.ErrNoCredentials)
|
||||
updateLoginMetrics(&dataprovider.User{BaseUser: sdk.BaseUser{Username: username}}, ipAddr, common.ErrNoCredentials)
|
||||
renderClientLoginPage(w, "Invalid credentials")
|
||||
return
|
||||
}
|
||||
if err := verifyCSRFToken(r.Form.Get(csrfFormToken)); err != nil {
|
||||
updateLoginMetrics(&dataprovider.User{Username: username}, ipAddr, err)
|
||||
updateLoginMetrics(&dataprovider.User{BaseUser: sdk.BaseUser{Username: username}}, ipAddr, err)
|
||||
renderClientLoginPage(w, err.Error())
|
||||
return
|
||||
}
|
||||
|
@ -197,7 +198,7 @@ func (s *httpdServer) handleWebAdminLoginPost(w http.ResponseWriter, r *http.Req
|
|||
renderLoginPage(w, err.Error())
|
||||
return
|
||||
}
|
||||
admin, err := dataprovider.CheckAdminAndPass(username, password, utils.GetIPFromRemoteAddress(r.RemoteAddr))
|
||||
admin, err := dataprovider.CheckAdminAndPass(username, password, util.GetIPFromRemoteAddress(r.RemoteAddr))
|
||||
if err != nil {
|
||||
renderLoginPage(w, err.Error())
|
||||
return
|
||||
|
@ -272,16 +273,16 @@ func (s *httpdServer) logout(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func (s *httpdServer) getUserToken(w http.ResponseWriter, r *http.Request) {
|
||||
ipAddr := utils.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||
username, password, ok := r.BasicAuth()
|
||||
if !ok {
|
||||
updateLoginMetrics(&dataprovider.User{Username: username}, ipAddr, common.ErrNoCredentials)
|
||||
updateLoginMetrics(&dataprovider.User{BaseUser: sdk.BaseUser{Username: username}}, ipAddr, common.ErrNoCredentials)
|
||||
w.Header().Set(common.HTTPAuthenticationHeader, basicRealm)
|
||||
sendAPIResponse(w, r, nil, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
if username == "" || password == "" {
|
||||
updateLoginMetrics(&dataprovider.User{Username: username}, ipAddr, common.ErrNoCredentials)
|
||||
updateLoginMetrics(&dataprovider.User{BaseUser: sdk.BaseUser{Username: username}}, ipAddr, common.ErrNoCredentials)
|
||||
w.Header().Set(common.HTTPAuthenticationHeader, basicRealm)
|
||||
sendAPIResponse(w, r, nil, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
return
|
||||
|
@ -344,7 +345,7 @@ func (s *httpdServer) getToken(w http.ResponseWriter, r *http.Request) {
|
|||
sendAPIResponse(w, r, nil, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
admin, err := dataprovider.CheckAdminAndPass(username, password, utils.GetIPFromRemoteAddress(r.RemoteAddr))
|
||||
admin, err := dataprovider.CheckAdminAndPass(username, password, util.GetIPFromRemoteAddress(r.RemoteAddr))
|
||||
if err != nil {
|
||||
w.Header().Set(common.HTTPAuthenticationHeader, basicRealm)
|
||||
sendAPIResponse(w, r, err, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
|
@ -384,7 +385,7 @@ func (s *httpdServer) checkCookieExpiration(w http.ResponseWriter, r *http.Reque
|
|||
if time.Until(token.Expiration()) > tokenRefreshMin {
|
||||
return
|
||||
}
|
||||
if utils.IsStringInSlice(tokenAudienceWebClient, token.Audience()) {
|
||||
if util.IsStringInSlice(tokenAudienceWebClient, token.Audience()) {
|
||||
s.refreshClientToken(w, r, tokenClaims)
|
||||
} else {
|
||||
s.refreshAdminToken(w, r, tokenClaims)
|
||||
|
@ -422,7 +423,7 @@ func (s *httpdServer) refreshAdminToken(w http.ResponseWriter, r *http.Request,
|
|||
logger.Debug(logSender, "", "signature mismatch for admin %#v, unable to refresh cookie", admin.Username)
|
||||
return
|
||||
}
|
||||
if !admin.CanLoginFromIP(utils.GetIPFromRemoteAddress(r.RemoteAddr)) {
|
||||
if !admin.CanLoginFromIP(util.GetIPFromRemoteAddress(r.RemoteAddr)) {
|
||||
logger.Debug(logSender, "", "admin %#v cannot login from %v, unable to refresh cookie", admin.Username, r.RemoteAddr)
|
||||
return
|
||||
}
|
||||
|
@ -446,12 +447,12 @@ func (s *httpdServer) updateContextFromCookie(r *http.Request) *http.Request {
|
|||
|
||||
func (s *httpdServer) checkConnection(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ipAddr := utils.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||
ip := net.ParseIP(ipAddr)
|
||||
if ip != nil {
|
||||
for _, allow := range s.binding.allowHeadersFrom {
|
||||
if allow(ip) {
|
||||
parsedIP := utils.GetRealIP(r)
|
||||
parsedIP := util.GetRealIP(r)
|
||||
if parsedIP != "" {
|
||||
ipAddr = parsedIP
|
||||
r.RemoteAddr = ipAddr
|
||||
|
@ -628,8 +629,8 @@ func (s *httpdServer) initializeRouter() {
|
|||
|
||||
router.Get(userLogoutPath, s.logout)
|
||||
router.Put(userPwdPath, changeUserPassword)
|
||||
router.With(checkHTTPUserPerm(dataprovider.WebClientPubKeyChangeDisabled)).Get(userPublicKeysPath, getUserPublicKeys)
|
||||
router.With(checkHTTPUserPerm(dataprovider.WebClientPubKeyChangeDisabled)).Put(userPublicKeysPath, setUserPublicKeys)
|
||||
router.With(checkHTTPUserPerm(sdk.WebClientPubKeyChangeDisabled)).Get(userPublicKeysPath, getUserPublicKeys)
|
||||
router.With(checkHTTPUserPerm(sdk.WebClientPubKeyChangeDisabled)).Put(userPublicKeysPath, setUserPublicKeys)
|
||||
router.Get(userReadFolderPath, readUserFolder)
|
||||
router.Get(userGetFilePath, getUserFile)
|
||||
router.Post(userStreamZipPath, getUserFilesAsZipStream)
|
||||
|
@ -674,7 +675,7 @@ func (s *httpdServer) initializeRouter() {
|
|||
router.With(s.refreshCookie).Get(webClientDownloadZipPath, handleWebClientDownloadZip)
|
||||
router.With(s.refreshCookie).Get(webClientCredentialsPath, handleClientGetCredentials)
|
||||
router.Post(webChangeClientPwdPath, handleWebClientChangePwdPost)
|
||||
router.With(checkHTTPUserPerm(dataprovider.WebClientPubKeyChangeDisabled)).
|
||||
router.With(checkHTTPUserPerm(sdk.WebClientPubKeyChangeDisabled)).
|
||||
Post(webChangeClientKeysPath, handleWebClientManageKeysPost)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -17,7 +17,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
@ -253,22 +254,22 @@ func loadAdminTemplates(templatesPath string) {
|
|||
}
|
||||
|
||||
rootTpl := template.New("").Funcs(template.FuncMap{
|
||||
"ListFSProviders": vfs.ListProviders,
|
||||
"ListFSProviders": sdk.ListProviders,
|
||||
})
|
||||
usersTmpl := utils.LoadTemplate(rootTpl, usersPaths...)
|
||||
userTmpl := utils.LoadTemplate(rootTpl, userPaths...)
|
||||
adminsTmpl := utils.LoadTemplate(rootTpl, adminsPaths...)
|
||||
adminTmpl := utils.LoadTemplate(rootTpl, adminPaths...)
|
||||
connectionsTmpl := utils.LoadTemplate(rootTpl, connectionsPaths...)
|
||||
messageTmpl := utils.LoadTemplate(rootTpl, messagePath...)
|
||||
foldersTmpl := utils.LoadTemplate(rootTpl, foldersPath...)
|
||||
folderTmpl := utils.LoadTemplate(rootTpl, folderPath...)
|
||||
statusTmpl := utils.LoadTemplate(rootTpl, statusPath...)
|
||||
loginTmpl := utils.LoadTemplate(rootTpl, loginPath...)
|
||||
changePwdTmpl := utils.LoadTemplate(rootTpl, changePwdPaths...)
|
||||
maintenanceTmpl := utils.LoadTemplate(rootTpl, maintenancePath...)
|
||||
defenderTmpl := utils.LoadTemplate(rootTpl, defenderPath...)
|
||||
setupTmpl := utils.LoadTemplate(rootTpl, setupPath...)
|
||||
usersTmpl := util.LoadTemplate(rootTpl, usersPaths...)
|
||||
userTmpl := util.LoadTemplate(rootTpl, userPaths...)
|
||||
adminsTmpl := util.LoadTemplate(rootTpl, adminsPaths...)
|
||||
adminTmpl := util.LoadTemplate(rootTpl, adminPaths...)
|
||||
connectionsTmpl := util.LoadTemplate(rootTpl, connectionsPaths...)
|
||||
messageTmpl := util.LoadTemplate(rootTpl, messagePath...)
|
||||
foldersTmpl := util.LoadTemplate(rootTpl, foldersPath...)
|
||||
folderTmpl := util.LoadTemplate(rootTpl, folderPath...)
|
||||
statusTmpl := util.LoadTemplate(rootTpl, statusPath...)
|
||||
loginTmpl := util.LoadTemplate(rootTpl, loginPath...)
|
||||
changePwdTmpl := util.LoadTemplate(rootTpl, changePwdPaths...)
|
||||
maintenanceTmpl := util.LoadTemplate(rootTpl, maintenancePath...)
|
||||
defenderTmpl := util.LoadTemplate(rootTpl, defenderPath...)
|
||||
setupTmpl := util.LoadTemplate(rootTpl, setupPath...)
|
||||
|
||||
adminTemplates[templateUsers] = usersTmpl
|
||||
adminTemplates[templateUser] = userTmpl
|
||||
|
@ -441,7 +442,7 @@ func renderUserPage(w http.ResponseWriter, r *http.Request, user *dataprovider.U
|
|||
ValidPerms: dataprovider.ValidPerms,
|
||||
ValidLoginMethods: dataprovider.ValidLoginMethods,
|
||||
ValidProtocols: dataprovider.ValidProtocols,
|
||||
WebClientOptions: dataprovider.WebClientOptions,
|
||||
WebClientOptions: sdk.WebClientOptions,
|
||||
RootDirPerms: user.GetPermissionsForPath("/"),
|
||||
VirtualFolders: folders,
|
||||
}
|
||||
|
@ -583,8 +584,8 @@ func getUserPermissionsFromPostFields(r *http.Request) map[string][]string {
|
|||
return permissions
|
||||
}
|
||||
|
||||
func getFilePatternsFromPostField(r *http.Request) []dataprovider.PatternsFilter {
|
||||
var result []dataprovider.PatternsFilter
|
||||
func getFilePatternsFromPostField(r *http.Request) []sdk.PatternsFilter {
|
||||
var result []sdk.PatternsFilter
|
||||
|
||||
allowedPatterns := make(map[string][]string)
|
||||
deniedPatterns := make(map[string][]string)
|
||||
|
@ -607,13 +608,13 @@ func getFilePatternsFromPostField(r *http.Request) []dataprovider.PatternsFilter
|
|||
}
|
||||
|
||||
for dirAllowed, allowPatterns := range allowedPatterns {
|
||||
filter := dataprovider.PatternsFilter{
|
||||
filter := sdk.PatternsFilter{
|
||||
Path: dirAllowed,
|
||||
AllowedPatterns: utils.RemoveDuplicates(allowPatterns),
|
||||
AllowedPatterns: util.RemoveDuplicates(allowPatterns),
|
||||
}
|
||||
for dirDenied, denPatterns := range deniedPatterns {
|
||||
if dirAllowed == dirDenied {
|
||||
filter.DeniedPatterns = utils.RemoveDuplicates(denPatterns)
|
||||
filter.DeniedPatterns = util.RemoveDuplicates(denPatterns)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -628,7 +629,7 @@ func getFilePatternsFromPostField(r *http.Request) []dataprovider.PatternsFilter
|
|||
}
|
||||
}
|
||||
if !found {
|
||||
result = append(result, dataprovider.PatternsFilter{
|
||||
result = append(result, sdk.PatternsFilter{
|
||||
Path: dirDenied,
|
||||
DeniedPatterns: denPatterns,
|
||||
})
|
||||
|
@ -637,23 +638,23 @@ func getFilePatternsFromPostField(r *http.Request) []dataprovider.PatternsFilter
|
|||
return result
|
||||
}
|
||||
|
||||
func getFiltersFromUserPostFields(r *http.Request) dataprovider.UserFilters {
|
||||
var filters dataprovider.UserFilters
|
||||
func getFiltersFromUserPostFields(r *http.Request) sdk.UserFilters {
|
||||
var filters sdk.UserFilters
|
||||
filters.AllowedIP = getSliceFromDelimitedValues(r.Form.Get("allowed_ip"), ",")
|
||||
filters.DeniedIP = getSliceFromDelimitedValues(r.Form.Get("denied_ip"), ",")
|
||||
filters.DeniedLoginMethods = r.Form["ssh_login_methods"]
|
||||
filters.DeniedProtocols = r.Form["denied_protocols"]
|
||||
filters.FilePatterns = getFilePatternsFromPostField(r)
|
||||
filters.TLSUsername = dataprovider.TLSUsername(r.Form.Get("tls_username"))
|
||||
filters.TLSUsername = sdk.TLSUsername(r.Form.Get("tls_username"))
|
||||
filters.WebClient = r.Form["web_client_options"]
|
||||
hooks := r.Form["hooks"]
|
||||
if utils.IsStringInSlice("external_auth_disabled", hooks) {
|
||||
if util.IsStringInSlice("external_auth_disabled", hooks) {
|
||||
filters.Hooks.ExternalAuthDisabled = true
|
||||
}
|
||||
if utils.IsStringInSlice("pre_login_disabled", hooks) {
|
||||
if util.IsStringInSlice("pre_login_disabled", hooks) {
|
||||
filters.Hooks.PreLoginDisabled = true
|
||||
}
|
||||
if utils.IsStringInSlice("check_password_disabled", hooks) {
|
||||
if util.IsStringInSlice("check_password_disabled", hooks) {
|
||||
filters.Hooks.CheckPasswordDisabled = true
|
||||
}
|
||||
filters.DisableFsChecks = len(r.Form.Get("disable_fs_checks")) > 0
|
||||
|
@ -758,29 +759,29 @@ func getAzureConfig(r *http.Request) (vfs.AzBlobFsConfig, error) {
|
|||
|
||||
func getFsConfigFromPostFields(r *http.Request) (vfs.Filesystem, error) {
|
||||
var fs vfs.Filesystem
|
||||
fs.Provider = vfs.GetProviderByName(r.Form.Get("fs_provider"))
|
||||
fs.Provider = sdk.GetProviderByName(r.Form.Get("fs_provider"))
|
||||
switch fs.Provider {
|
||||
case vfs.S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
config, err := getS3Config(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.S3Config = config
|
||||
case vfs.AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
config, err := getAzureConfig(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.AzBlobConfig = config
|
||||
case vfs.GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
config, err := getGCSConfig(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.GCSConfig = config
|
||||
case vfs.CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
fs.CryptConfig.Passphrase = getSecretFromFormField(r, "crypt_passphrase")
|
||||
case vfs.SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
config, err := getSFTPConfig(r)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
|
@ -826,15 +827,15 @@ func getFolderFromTemplate(folder vfs.BaseVirtualFolder, name string) vfs.BaseVi
|
|||
folder.MappedPath = replacePlaceholders(folder.MappedPath, replacements)
|
||||
folder.Description = replacePlaceholders(folder.Description, replacements)
|
||||
switch folder.FsConfig.Provider {
|
||||
case vfs.CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
folder.FsConfig.CryptConfig = getCryptFsFromTemplate(folder.FsConfig.CryptConfig, replacements)
|
||||
case vfs.S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
folder.FsConfig.S3Config = getS3FsFromTemplate(folder.FsConfig.S3Config, replacements)
|
||||
case vfs.GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
folder.FsConfig.GCSConfig = getGCSFsFromTemplate(folder.FsConfig.GCSConfig, replacements)
|
||||
case vfs.AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
folder.FsConfig.AzBlobConfig = getAzBlobFsFromTemplate(folder.FsConfig.AzBlobConfig, replacements)
|
||||
case vfs.SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
folder.FsConfig.SFTPConfig = getSFTPFsFromTemplate(folder.FsConfig.SFTPConfig, replacements)
|
||||
}
|
||||
|
||||
|
@ -910,15 +911,15 @@ func getUserFromTemplate(user dataprovider.User, template userTemplateFields) da
|
|||
user.AdditionalInfo = replacePlaceholders(user.AdditionalInfo, replacements)
|
||||
|
||||
switch user.FsConfig.Provider {
|
||||
case vfs.CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
user.FsConfig.CryptConfig = getCryptFsFromTemplate(user.FsConfig.CryptConfig, replacements)
|
||||
case vfs.S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
user.FsConfig.S3Config = getS3FsFromTemplate(user.FsConfig.S3Config, replacements)
|
||||
case vfs.GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
user.FsConfig.GCSConfig = getGCSFsFromTemplate(user.FsConfig.GCSConfig, replacements)
|
||||
case vfs.AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
user.FsConfig.AzBlobConfig = getAzBlobFsFromTemplate(user.FsConfig.AzBlobConfig, replacements)
|
||||
case vfs.SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
user.FsConfig.SFTPConfig = getSFTPFsFromTemplate(user.FsConfig.SFTPConfig, replacements)
|
||||
}
|
||||
|
||||
|
@ -970,18 +971,18 @@ func getUserFromPostFields(r *http.Request) (dataprovider.User, error) {
|
|||
if err != nil {
|
||||
return user, err
|
||||
}
|
||||
expirationDateMillis = utils.GetTimeAsMsSinceEpoch(expirationDate)
|
||||
expirationDateMillis = util.GetTimeAsMsSinceEpoch(expirationDate)
|
||||
}
|
||||
fsConfig, err := getFsConfigFromPostFields(r)
|
||||
if err != nil {
|
||||
return user, err
|
||||
}
|
||||
user = dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: r.Form.Get("username"),
|
||||
Password: r.Form.Get("password"),
|
||||
PublicKeys: r.Form["public_keys"],
|
||||
HomeDir: r.Form.Get("home_dir"),
|
||||
VirtualFolders: getVirtualFoldersFromPostFields(r),
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
Permissions: getUserPermissionsFromPostFields(r),
|
||||
|
@ -993,9 +994,11 @@ func getUserFromPostFields(r *http.Request) (dataprovider.User, error) {
|
|||
Status: status,
|
||||
ExpirationDate: expirationDateMillis,
|
||||
Filters: getFiltersFromUserPostFields(r),
|
||||
FsConfig: fsConfig,
|
||||
AdditionalInfo: r.Form.Get("additional_info"),
|
||||
Description: r.Form.Get("description"),
|
||||
},
|
||||
VirtualFolders: getVirtualFoldersFromPostFields(r),
|
||||
FsConfig: fsConfig,
|
||||
}
|
||||
maxFileSize, err := strconv.ParseInt(r.Form.Get("max_upload_file_size"), 10, 64)
|
||||
user.Filters.MaxUploadFileSize = maxFileSize
|
||||
|
@ -1146,7 +1149,7 @@ func handleWebUpdateAdminGet(w http.ResponseWriter, r *http.Request) {
|
|||
admin, err := dataprovider.AdminExists(username)
|
||||
if err == nil {
|
||||
renderAddUpdateAdminPage(w, r, &admin, "", false)
|
||||
} else if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
} else if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
renderNotFoundPage(w, r, err)
|
||||
} else {
|
||||
renderInternalServerErrorPage(w, r, err)
|
||||
|
@ -1177,7 +1180,7 @@ func handleWebUpdateAdminPost(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
username := getURLParam(r, "username")
|
||||
admin, err := dataprovider.AdminExists(username)
|
||||
if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
renderNotFoundPage(w, r, err)
|
||||
return
|
||||
} else if err != nil {
|
||||
|
@ -1265,7 +1268,7 @@ func handleWebTemplateFolderGet(w http.ResponseWriter, r *http.Request) {
|
|||
folder, err := dataprovider.GetFolderByName(name)
|
||||
if err == nil {
|
||||
renderFolderPage(w, r, folder, folderPageModeTemplate, "")
|
||||
} else if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
} else if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
renderNotFoundPage(w, r, err)
|
||||
} else {
|
||||
renderInternalServerErrorPage(w, r, err)
|
||||
|
@ -1328,13 +1331,13 @@ func handleWebTemplateUserGet(w http.ResponseWriter, r *http.Request) {
|
|||
if err == nil {
|
||||
user.SetEmptySecrets()
|
||||
renderUserPage(w, r, &user, userPageModeTemplate, "")
|
||||
} else if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
} else if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
renderNotFoundPage(w, r, err)
|
||||
} else {
|
||||
renderInternalServerErrorPage(w, r, err)
|
||||
}
|
||||
} else {
|
||||
user := dataprovider.User{Status: 1}
|
||||
user := dataprovider.User{BaseUser: sdk.BaseUser{Status: 1}}
|
||||
renderUserPage(w, r, &user, userPageModeTemplate, "")
|
||||
}
|
||||
}
|
||||
|
@ -1388,13 +1391,13 @@ func handleWebAddUserGet(w http.ResponseWriter, r *http.Request) {
|
|||
user.Password = ""
|
||||
user.SetEmptySecrets()
|
||||
renderUserPage(w, r, &user, userPageModeAdd, "")
|
||||
} else if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
} else if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
renderNotFoundPage(w, r, err)
|
||||
} else {
|
||||
renderInternalServerErrorPage(w, r, err)
|
||||
}
|
||||
} else {
|
||||
user := dataprovider.User{Status: 1}
|
||||
user := dataprovider.User{BaseUser: sdk.BaseUser{Status: 1}}
|
||||
renderUserPage(w, r, &user, userPageModeAdd, "")
|
||||
}
|
||||
}
|
||||
|
@ -1404,7 +1407,7 @@ func handleWebUpdateUserGet(w http.ResponseWriter, r *http.Request) {
|
|||
user, err := dataprovider.UserExists(username)
|
||||
if err == nil {
|
||||
renderUserPage(w, r, &user, userPageModeUpdate, "")
|
||||
} else if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
} else if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
renderNotFoundPage(w, r, err)
|
||||
} else {
|
||||
renderInternalServerErrorPage(w, r, err)
|
||||
|
@ -1434,7 +1437,7 @@ func handleWebUpdateUserPost(w http.ResponseWriter, r *http.Request) {
|
|||
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||
username := getURLParam(r, "username")
|
||||
user, err := dataprovider.UserExists(username)
|
||||
if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
renderNotFoundPage(w, r, err)
|
||||
return
|
||||
} else if err != nil {
|
||||
|
@ -1527,7 +1530,7 @@ func handleWebUpdateFolderGet(w http.ResponseWriter, r *http.Request) {
|
|||
folder, err := dataprovider.GetFolderByName(name)
|
||||
if err == nil {
|
||||
renderFolderPage(w, r, folder, folderPageModeUpdate, "")
|
||||
} else if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
} else if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
renderNotFoundPage(w, r, err)
|
||||
} else {
|
||||
renderInternalServerErrorPage(w, r, err)
|
||||
|
@ -1538,7 +1541,7 @@ func handleWebUpdateFolderPost(w http.ResponseWriter, r *http.Request) {
|
|||
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
||||
name := getURLParam(r, "name")
|
||||
folder, err := dataprovider.GetFolderByName(name)
|
||||
if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
renderNotFoundPage(w, r, err)
|
||||
return
|
||||
} else if err != nil {
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
@ -123,10 +123,10 @@ func loadClientTemplates(templatesPath string) {
|
|||
filepath.Join(templatesPath, templateClientDir, templateClientMessage),
|
||||
}
|
||||
|
||||
filesTmpl := utils.LoadTemplate(nil, filesPaths...)
|
||||
credentialsTmpl := utils.LoadTemplate(nil, credentialsPaths...)
|
||||
loginTmpl := utils.LoadTemplate(nil, loginPath...)
|
||||
messageTmpl := utils.LoadTemplate(nil, messagePath...)
|
||||
filesTmpl := util.LoadTemplate(nil, filesPaths...)
|
||||
credentialsTmpl := util.LoadTemplate(nil, credentialsPaths...)
|
||||
loginTmpl := util.LoadTemplate(nil, loginPath...)
|
||||
messageTmpl := util.LoadTemplate(nil, messagePath...)
|
||||
|
||||
clientTemplates[templateClientFiles] = filesTmpl
|
||||
clientTemplates[templateClientCredentials] = credentialsTmpl
|
||||
|
@ -291,7 +291,7 @@ func handleWebClientDownloadZip(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
name := "/"
|
||||
if _, ok := r.URL.Query()["path"]; ok {
|
||||
name = utils.CleanPath(r.URL.Query().Get("path"))
|
||||
name = util.CleanPath(r.URL.Query().Get("path"))
|
||||
}
|
||||
|
||||
files := r.URL.Query().Get("files")
|
||||
|
@ -334,7 +334,7 @@ func handleClientGetDirContents(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
name := "/"
|
||||
if _, ok := r.URL.Query()["path"]; ok {
|
||||
name = utils.CleanPath(r.URL.Query().Get("path"))
|
||||
name = util.CleanPath(r.URL.Query().Get("path"))
|
||||
}
|
||||
|
||||
contents, err := connection.ReadDir(name)
|
||||
|
@ -354,7 +354,7 @@ func handleClientGetDirContents(w http.ResponseWriter, r *http.Request) {
|
|||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
res["size"] = ""
|
||||
} else {
|
||||
res["size"] = utils.ByteCountIEC(info.Size())
|
||||
res["size"] = util.ByteCountIEC(info.Size())
|
||||
}
|
||||
}
|
||||
res["name"] = info.Name()
|
||||
|
@ -394,7 +394,7 @@ func handleClientGetFiles(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
name := "/"
|
||||
if _, ok := r.URL.Query()["path"]; ok {
|
||||
name = utils.CleanPath(r.URL.Query().Get("path"))
|
||||
name = util.CleanPath(r.URL.Query().Get("path"))
|
||||
}
|
||||
var info os.FileInfo
|
||||
if name == "/" {
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/httpclient"
|
||||
"github.com/drakkan/sftpgo/v2/httpd"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
@ -914,7 +914,7 @@ func checkAdmin(expected *dataprovider.Admin, actual *dataprovider.Admin) error
|
|||
return errors.New("permissions mismatch")
|
||||
}
|
||||
for _, p := range expected.Permissions {
|
||||
if !utils.IsStringInSlice(p, actual.Permissions) {
|
||||
if !util.IsStringInSlice(p, actual.Permissions) {
|
||||
return errors.New("permissions content mismatch")
|
||||
}
|
||||
}
|
||||
|
@ -922,7 +922,7 @@ func checkAdmin(expected *dataprovider.Admin, actual *dataprovider.Admin) error
|
|||
return errors.New("allow list mismatch")
|
||||
}
|
||||
for _, v := range expected.Filters.AllowList {
|
||||
if !utils.IsStringInSlice(v, actual.Filters.AllowList) {
|
||||
if !util.IsStringInSlice(v, actual.Filters.AllowList) {
|
||||
return errors.New("allow list content mismatch")
|
||||
}
|
||||
}
|
||||
|
@ -968,7 +968,7 @@ func checkUser(expected *dataprovider.User, actual *dataprovider.User) error {
|
|||
for dir, perms := range expected.Permissions {
|
||||
if actualPerms, ok := actual.Permissions[dir]; ok {
|
||||
for _, v := range actualPerms {
|
||||
if !utils.IsStringInSlice(v, perms) {
|
||||
if !util.IsStringInSlice(v, perms) {
|
||||
return errors.New("permissions contents mismatch")
|
||||
}
|
||||
}
|
||||
|
@ -1112,7 +1112,7 @@ func compareSFTPFsConfig(expected *vfs.Filesystem, actual *vfs.Filesystem) error
|
|||
return errors.New("SFTPFs fingerprints mismatch")
|
||||
}
|
||||
for _, value := range actual.SFTPConfig.Fingerprints {
|
||||
if !utils.IsStringInSlice(value, expected.SFTPConfig.Fingerprints) {
|
||||
if !util.IsStringInSlice(value, expected.SFTPConfig.Fingerprints) {
|
||||
return errors.New("SFTPFs fingerprints mismatch")
|
||||
}
|
||||
}
|
||||
|
@ -1197,27 +1197,27 @@ func checkEncryptedSecret(expected, actual *kms.Secret) error {
|
|||
|
||||
func compareUserFilterSubStructs(expected *dataprovider.User, actual *dataprovider.User) error {
|
||||
for _, IPMask := range expected.Filters.AllowedIP {
|
||||
if !utils.IsStringInSlice(IPMask, actual.Filters.AllowedIP) {
|
||||
if !util.IsStringInSlice(IPMask, actual.Filters.AllowedIP) {
|
||||
return errors.New("allowed IP contents mismatch")
|
||||
}
|
||||
}
|
||||
for _, IPMask := range expected.Filters.DeniedIP {
|
||||
if !utils.IsStringInSlice(IPMask, actual.Filters.DeniedIP) {
|
||||
if !util.IsStringInSlice(IPMask, actual.Filters.DeniedIP) {
|
||||
return errors.New("denied IP contents mismatch")
|
||||
}
|
||||
}
|
||||
for _, method := range expected.Filters.DeniedLoginMethods {
|
||||
if !utils.IsStringInSlice(method, actual.Filters.DeniedLoginMethods) {
|
||||
if !util.IsStringInSlice(method, actual.Filters.DeniedLoginMethods) {
|
||||
return errors.New("denied login methods contents mismatch")
|
||||
}
|
||||
}
|
||||
for _, protocol := range expected.Filters.DeniedProtocols {
|
||||
if !utils.IsStringInSlice(protocol, actual.Filters.DeniedProtocols) {
|
||||
if !util.IsStringInSlice(protocol, actual.Filters.DeniedProtocols) {
|
||||
return errors.New("denied protocols contents mismatch")
|
||||
}
|
||||
}
|
||||
for _, options := range expected.Filters.WebClient {
|
||||
if !utils.IsStringInSlice(options, actual.Filters.WebClient) {
|
||||
if !util.IsStringInSlice(options, actual.Filters.WebClient) {
|
||||
return errors.New("web client options contents mismatch")
|
||||
}
|
||||
}
|
||||
|
@ -1269,7 +1269,7 @@ func checkFilterMatch(expected []string, actual []string) bool {
|
|||
return false
|
||||
}
|
||||
for _, e := range expected {
|
||||
if !utils.IsStringInSlice(strings.ToLower(e), actual) {
|
||||
if !util.IsStringInSlice(strings.ToLower(e), actual) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// SecretProvider defines the interface for a KMS secrets provider
|
||||
|
@ -98,7 +98,7 @@ func NewPlainSecret(payload string) *Secret {
|
|||
|
||||
// GetSecretFromCompatString returns a secret from the previous format
|
||||
func GetSecretFromCompatString(secret string) (*Secret, error) {
|
||||
plain, err := utils.DecryptData(secret)
|
||||
plain, err := util.DecryptData(secret)
|
||||
if err != nil {
|
||||
return &Secret{}, errMalformedCiphertext
|
||||
}
|
||||
|
@ -401,7 +401,7 @@ func (s *Secret) IsValidInput() bool {
|
|||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
if !utils.IsStringInSlice(s.provider.GetStatus(), validSecretStatuses) {
|
||||
if !util.IsStringInSlice(s.provider.GetStatus(), validSecretStatuses) {
|
||||
return false
|
||||
}
|
||||
if s.provider.GetPayload() == "" {
|
||||
|
|
66
logger/hclog_adapter.go
Normal file
66
logger/hclog_adapter.go
Normal file
|
@ -0,0 +1,66 @@
|
|||
package logger
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type HCLogAdapter struct {
|
||||
hclog.Logger
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) Log(level hclog.Level, msg string, args ...interface{}) {
|
||||
var ev *zerolog.Event
|
||||
switch level {
|
||||
case hclog.Info:
|
||||
ev = logger.Info()
|
||||
case hclog.Warn:
|
||||
ev = logger.Warn()
|
||||
case hclog.Error:
|
||||
ev = logger.Error()
|
||||
default:
|
||||
ev = logger.Debug()
|
||||
}
|
||||
ev.Timestamp().Str("sender", l.Name())
|
||||
addKeysAndValues(ev, args...)
|
||||
ev.Msg(msg)
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) Trace(msg string, args ...interface{}) {
|
||||
l.Log(hclog.Debug, msg, args...)
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) Debug(msg string, args ...interface{}) {
|
||||
l.Log(hclog.Debug, msg, args...)
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) Info(msg string, args ...interface{}) {
|
||||
l.Log(hclog.Info, msg, args...)
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) Warn(msg string, args ...interface{}) {
|
||||
l.Log(hclog.Warn, msg, args...)
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) Error(msg string, args ...interface{}) {
|
||||
l.Log(hclog.Error, msg, args...)
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) With(args ...interface{}) hclog.Logger {
|
||||
return &HCLogAdapter{Logger: l.Logger.With(args...)}
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) Named(name string) hclog.Logger {
|
||||
return &HCLogAdapter{Logger: l.Logger.Named(name)}
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) StandardLogger(opts *hclog.StandardLoggerOptions) *log.Logger {
|
||||
return log.New(&StdLoggerWrapper{Sender: l.Name()}, "", 0)
|
||||
}
|
||||
|
||||
func (l *HCLogAdapter) StandardWriter(opts *hclog.StandardLoggerOptions) io.Writer {
|
||||
return &StdLoggerWrapper{Sender: l.Name()}
|
||||
}
|
|
@ -63,7 +63,7 @@ type LeveledLogger struct {
|
|||
Sender string
|
||||
}
|
||||
|
||||
func (l *LeveledLogger) addKeysAndValues(ev *zerolog.Event, keysAndValues ...interface{}) {
|
||||
func addKeysAndValues(ev *zerolog.Event, keysAndValues ...interface{}) {
|
||||
kvLen := len(keysAndValues)
|
||||
if kvLen%2 != 0 {
|
||||
extra := keysAndValues[kvLen-1]
|
||||
|
@ -71,7 +71,7 @@ func (l *LeveledLogger) addKeysAndValues(ev *zerolog.Event, keysAndValues ...int
|
|||
}
|
||||
for i := 0; i < len(keysAndValues); i += 2 {
|
||||
key, val := keysAndValues[i], keysAndValues[i+1]
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr, ok := key.(string); ok && keyStr != "timestamp" {
|
||||
ev.Str(keyStr, fmt.Sprintf("%v", val))
|
||||
}
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func (l *LeveledLogger) addKeysAndValues(ev *zerolog.Event, keysAndValues ...int
|
|||
func (l *LeveledLogger) Error(msg string, keysAndValues ...interface{}) {
|
||||
ev := logger.Error()
|
||||
ev.Timestamp().Str("sender", l.Sender)
|
||||
l.addKeysAndValues(ev, keysAndValues...)
|
||||
addKeysAndValues(ev, keysAndValues...)
|
||||
ev.Msg(msg)
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ func (l *LeveledLogger) Error(msg string, keysAndValues ...interface{}) {
|
|||
func (l *LeveledLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||
ev := logger.Info()
|
||||
ev.Timestamp().Str("sender", l.Sender)
|
||||
l.addKeysAndValues(ev, keysAndValues...)
|
||||
addKeysAndValues(ev, keysAndValues...)
|
||||
ev.Msg(msg)
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ func (l *LeveledLogger) Info(msg string, keysAndValues ...interface{}) {
|
|||
func (l *LeveledLogger) Debug(msg string, keysAndValues ...interface{}) {
|
||||
ev := logger.Debug()
|
||||
ev.Timestamp().Str("sender", l.Sender)
|
||||
l.addKeysAndValues(ev, keysAndValues...)
|
||||
addKeysAndValues(ev, keysAndValues...)
|
||||
ev.Msg(msg)
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ func (l *LeveledLogger) Debug(msg string, keysAndValues ...interface{}) {
|
|||
func (l *LeveledLogger) Warn(msg string, keysAndValues ...interface{}) {
|
||||
ev := logger.Warn()
|
||||
ev.Timestamp().Str("sender", l.Sender)
|
||||
l.addKeysAndValues(ev, keysAndValues...)
|
||||
addKeysAndValues(ev, keysAndValues...)
|
||||
ev.Msg(msg)
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
)
|
||||
|
||||
// StructuredLogger defines a simple wrapper around zerolog logger.
|
||||
|
@ -56,7 +56,7 @@ func (l *StructuredLogger) NewLogEntry(r *http.Request) middleware.LogEntry {
|
|||
|
||||
// Write logs a new entry at the end of the HTTP request
|
||||
func (l *StructuredLoggerEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) {
|
||||
metrics.HTTPRequestServed(status)
|
||||
metric.HTTPRequestServed(status)
|
||||
l.Logger.Info().
|
||||
Timestamp().
|
||||
Str("sender", "httpd").
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// +build !nometrics
|
||||
|
||||
// Package metrics provides Prometheus metrics support
|
||||
package metrics
|
||||
package metric
|
||||
|
||||
import (
|
||||
"github.com/go-chi/chi/v5"
|
|
@ -1,6 +1,6 @@
|
|||
// +build nometrics
|
||||
|
||||
package metrics
|
||||
package metric
|
||||
|
||||
import (
|
||||
"github.com/go-chi/chi/v5"
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
NFPM_VERSION=2.5.1
|
||||
NFPM_VERSION=2.6.0
|
||||
NFPM_ARCH=${NFPM_ARCH:-amd64}
|
||||
if [ -z ${SFTPGO_VERSION} ]
|
||||
then
|
||||
|
|
202
sdk/filesystem.go
Normal file
202
sdk/filesystem.go
Normal file
|
@ -0,0 +1,202 @@
|
|||
package sdk
|
||||
|
||||
import "github.com/drakkan/sftpgo/v2/kms"
|
||||
|
||||
// FilesystemProvider defines the supported storage filesystems
|
||||
type FilesystemProvider int
|
||||
|
||||
// supported values for FilesystemProvider
|
||||
const (
|
||||
LocalFilesystemProvider FilesystemProvider = iota // Local
|
||||
S3FilesystemProvider // AWS S3 compatible
|
||||
GCSFilesystemProvider // Google Cloud Storage
|
||||
AzureBlobFilesystemProvider // Azure Blob Storage
|
||||
CryptedFilesystemProvider // Local encrypted
|
||||
SFTPFilesystemProvider // SFTP
|
||||
)
|
||||
|
||||
// GetProviderByName returns the FilesystemProvider matching a given name
|
||||
// to provide backwards compatibility, numeric strings are accepted as well
|
||||
func GetProviderByName(name string) FilesystemProvider {
|
||||
switch name {
|
||||
case "0", "osfs":
|
||||
return LocalFilesystemProvider
|
||||
case "1", "s3fs":
|
||||
return S3FilesystemProvider
|
||||
case "2", "gcsfs":
|
||||
return GCSFilesystemProvider
|
||||
case "3", "azblobfs":
|
||||
return AzureBlobFilesystemProvider
|
||||
case "4", "cryptfs":
|
||||
return CryptedFilesystemProvider
|
||||
case "5", "sftpfs":
|
||||
return SFTPFilesystemProvider
|
||||
}
|
||||
|
||||
// TODO think about returning an error value instead of silently defaulting to LocalFilesystemProvider
|
||||
return LocalFilesystemProvider
|
||||
}
|
||||
|
||||
// Name returns the Provider's unique name
|
||||
func (p FilesystemProvider) Name() string {
|
||||
switch p {
|
||||
case LocalFilesystemProvider:
|
||||
return "osfs"
|
||||
case S3FilesystemProvider:
|
||||
return "s3fs"
|
||||
case GCSFilesystemProvider:
|
||||
return "gcsfs"
|
||||
case AzureBlobFilesystemProvider:
|
||||
return "azblobfs"
|
||||
case CryptedFilesystemProvider:
|
||||
return "cryptfs"
|
||||
case SFTPFilesystemProvider:
|
||||
return "sftpfs"
|
||||
}
|
||||
return "" // let's not claim to be
|
||||
}
|
||||
|
||||
// ShortInfo returns a human readable, short description for the given FilesystemProvider
|
||||
func (p FilesystemProvider) ShortInfo() string {
|
||||
switch p {
|
||||
case LocalFilesystemProvider:
|
||||
return "Local"
|
||||
case S3FilesystemProvider:
|
||||
return "AWS S3 (Compatible)"
|
||||
case GCSFilesystemProvider:
|
||||
return "Google Cloud Storage"
|
||||
case AzureBlobFilesystemProvider:
|
||||
return "Azure Blob Storage"
|
||||
case CryptedFilesystemProvider:
|
||||
return "Local encrypted"
|
||||
case SFTPFilesystemProvider:
|
||||
return "SFTP"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ListProviders returns a list of available FilesystemProviders.
|
||||
func ListProviders() []FilesystemProvider {
|
||||
return []FilesystemProvider{
|
||||
LocalFilesystemProvider, S3FilesystemProvider,
|
||||
GCSFilesystemProvider, AzureBlobFilesystemProvider,
|
||||
CryptedFilesystemProvider, SFTPFilesystemProvider,
|
||||
}
|
||||
}
|
||||
|
||||
// S3FsConfig defines the configuration for S3 based filesystem
|
||||
type S3FsConfig struct {
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
// KeyPrefix is similar to a chroot directory for local filesystem.
|
||||
// If specified then the SFTP user will only see objects that starts
|
||||
// with this prefix and so you can restrict access to a specific
|
||||
// folder. The prefix, if not empty, must not start with "/" and must
|
||||
// end with "/".
|
||||
// If empty the whole bucket contents will be available
|
||||
KeyPrefix string `json:"key_prefix,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
AccessKey string `json:"access_key,omitempty"`
|
||||
AccessSecret *kms.Secret `json:"access_secret,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
StorageClass string `json:"storage_class,omitempty"`
|
||||
// The buffer size (in MB) to use for multipart uploads. The minimum allowed part size is 5MB,
|
||||
// and if this value is set to zero, the default value (5MB) for the AWS SDK will be used.
|
||||
// The minimum allowed value is 5.
|
||||
// Please note that if the upload bandwidth between the SFTP client and SFTPGo is greater than
|
||||
// the upload bandwidth between SFTPGo and S3 then the SFTP client have to wait for the upload
|
||||
// of the last parts to S3 after it ends the file upload to SFTPGo, and it may time out.
|
||||
// Keep this in mind if you customize these parameters.
|
||||
UploadPartSize int64 `json:"upload_part_size,omitempty"`
|
||||
// How many parts are uploaded in parallel
|
||||
UploadConcurrency int `json:"upload_concurrency,omitempty"`
|
||||
}
|
||||
|
||||
// GCSFsConfig defines the configuration for Google Cloud Storage based filesystem
|
||||
type GCSFsConfig struct {
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
// KeyPrefix is similar to a chroot directory for local filesystem.
|
||||
// If specified then the SFTP user will only see objects that starts
|
||||
// with this prefix and so you can restrict access to a specific
|
||||
// folder. The prefix, if not empty, must not start with "/" and must
|
||||
// end with "/".
|
||||
// If empty the whole bucket contents will be available
|
||||
KeyPrefix string `json:"key_prefix,omitempty"`
|
||||
CredentialFile string `json:"-"`
|
||||
Credentials *kms.Secret `json:"credentials,omitempty"`
|
||||
// 0 explicit, 1 automatic
|
||||
AutomaticCredentials int `json:"automatic_credentials,omitempty"`
|
||||
StorageClass string `json:"storage_class,omitempty"`
|
||||
}
|
||||
|
||||
// AzBlobFsConfig defines the configuration for Azure Blob Storage based filesystem
|
||||
type AzBlobFsConfig struct {
|
||||
Container string `json:"container,omitempty"`
|
||||
// Storage Account Name, leave blank to use SAS URL
|
||||
AccountName string `json:"account_name,omitempty"`
|
||||
// Storage Account Key leave blank to use SAS URL.
|
||||
// The access key is stored encrypted based on the kms configuration
|
||||
AccountKey *kms.Secret `json:"account_key,omitempty"`
|
||||
// Optional endpoint. Default is "blob.core.windows.net".
|
||||
// If you use the emulator the endpoint must include the protocol,
|
||||
// for example "http://127.0.0.1:10000"
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
// Shared access signature URL, leave blank if using account/key
|
||||
SASURL *kms.Secret `json:"sas_url,omitempty"`
|
||||
// KeyPrefix is similar to a chroot directory for local filesystem.
|
||||
// If specified then the SFTPGo user will only see objects that starts
|
||||
// with this prefix and so you can restrict access to a specific
|
||||
// folder. The prefix, if not empty, must not start with "/" and must
|
||||
// end with "/".
|
||||
// If empty the whole bucket contents will be available
|
||||
KeyPrefix string `json:"key_prefix,omitempty"`
|
||||
// The buffer size (in MB) to use for multipart uploads.
|
||||
// If this value is set to zero, the default value (1MB) for the Azure SDK will be used.
|
||||
// Please note that if the upload bandwidth between the SFTPGo client and SFTPGo server is
|
||||
// greater than the upload bandwidth between SFTPGo and Azure then the SFTP client have
|
||||
// to wait for the upload of the last parts to Azure after it ends the file upload to SFTPGo,
|
||||
// and it may time out.
|
||||
// Keep this in mind if you customize these parameters.
|
||||
UploadPartSize int64 `json:"upload_part_size,omitempty"`
|
||||
// How many parts are uploaded in parallel
|
||||
UploadConcurrency int `json:"upload_concurrency,omitempty"`
|
||||
// Set to true if you use an Azure emulator such as Azurite
|
||||
UseEmulator bool `json:"use_emulator,omitempty"`
|
||||
// Blob Access Tier
|
||||
AccessTier string `json:"access_tier,omitempty"`
|
||||
}
|
||||
|
||||
// CryptFsConfig defines the configuration to store local files as encrypted
|
||||
type CryptFsConfig struct {
|
||||
Passphrase *kms.Secret `json:"passphrase,omitempty"`
|
||||
}
|
||||
|
||||
// SFTPFsConfig defines the configuration for SFTP based filesystem
|
||||
type SFTPFsConfig struct {
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password *kms.Secret `json:"password,omitempty"`
|
||||
PrivateKey *kms.Secret `json:"private_key,omitempty"`
|
||||
Fingerprints []string `json:"fingerprints,omitempty"`
|
||||
// Prefix is the path prefix to strip from SFTP resource paths.
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
// Concurrent reads are safe to use and disabling them will degrade performance.
|
||||
// Some servers automatically delete files once they are downloaded.
|
||||
// Using concurrent reads is problematic with such servers.
|
||||
DisableCouncurrentReads bool `json:"disable_concurrent_reads,omitempty"`
|
||||
// The buffer size (in MB) to use for transfers.
|
||||
// Buffering could improve performance for high latency networks.
|
||||
// With buffering enabled upload resume is not supported and a file
|
||||
// cannot be opened for both reading and writing at the same time
|
||||
// 0 means disabled.
|
||||
BufferSize int64 `json:"buffer_size,omitempty"`
|
||||
}
|
||||
|
||||
// Filesystem defines filesystem details
|
||||
type Filesystem struct {
|
||||
Provider FilesystemProvider `json:"provider"`
|
||||
S3Config S3FsConfig `json:"s3config,omitempty"`
|
||||
GCSConfig GCSFsConfig `json:"gcsconfig,omitempty"`
|
||||
AzBlobConfig AzBlobFsConfig `json:"azblobconfig,omitempty"`
|
||||
CryptConfig CryptFsConfig `json:"cryptconfig,omitempty"`
|
||||
SFTPConfig SFTPFsConfig `json:"sftpconfig,omitempty"`
|
||||
}
|
35
sdk/folder.go
Normal file
35
sdk/folder.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package sdk
|
||||
|
||||
// BaseVirtualFolder defines the path for the virtual folder and the used quota limits.
|
||||
// The same folder can be shared among multiple users and each user can have different
|
||||
// quota limits or a different virtual path.
|
||||
type BaseVirtualFolder struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
MappedPath string `json:"mapped_path,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
UsedQuotaSize int64 `json:"used_quota_size"`
|
||||
// Used quota as number of files
|
||||
UsedQuotaFiles int `json:"used_quota_files"`
|
||||
// Last quota update as unix timestamp in milliseconds
|
||||
LastQuotaUpdate int64 `json:"last_quota_update"`
|
||||
// list of usernames associated with this virtual folder
|
||||
Users []string `json:"users,omitempty"`
|
||||
// Filesystem configuration details
|
||||
FsConfig Filesystem `json:"filesystem"`
|
||||
}
|
||||
|
||||
// VirtualFolder defines a mapping between an SFTPGo exposed virtual path and a
|
||||
// filesystem path outside the user home directory.
|
||||
// The specified paths must be absolute and the virtual path cannot be "/",
|
||||
// it must be a sub directory. The parent directory for the specified virtual
|
||||
// path must exist. SFTPGo will, by default, try to automatically create any missing
|
||||
// parent directory for the configured virtual folders at user login.
|
||||
type VirtualFolder struct {
|
||||
BaseVirtualFolder
|
||||
VirtualPath string `json:"virtual_path"`
|
||||
// Maximum size allowed as bytes. 0 means unlimited, -1 included in user quota
|
||||
QuotaSize int64 `json:"quota_size"`
|
||||
// Maximum number of files allowed. 0 means unlimited, -1 included in user quota
|
||||
QuotaFiles int `json:"quota_files"`
|
||||
}
|
5
sdk/plugin/mkproto.sh
Executable file
5
sdk/plugin/mkproto.sh
Executable file
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
protoc notifier/proto/notifier.proto --go_out=plugins=grpc:../.. --go_out=../../..
|
||||
|
||||
|
135
sdk/plugin/notifier.go
Normal file
135
sdk/plugin/notifier.go
Normal file
|
@ -0,0 +1,135 @@
|
|||
package plugin
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-plugin"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin/notifier"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// NotifierConfig defines configuration parameters for notifiers plugins
|
||||
type NotifierConfig struct {
|
||||
FsEvents []string `json:"fs_events" mapstructure:"fs_events"`
|
||||
UserEvents []string `json:"user_events" mapstructure:"user_events"`
|
||||
}
|
||||
|
||||
func (c *NotifierConfig) hasActions() bool {
|
||||
if len(c.FsEvents) > 0 {
|
||||
return true
|
||||
}
|
||||
if len(c.UserEvents) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type notifierPlugin struct {
|
||||
config Config
|
||||
notifier notifier.Notifier
|
||||
client *plugin.Client
|
||||
}
|
||||
|
||||
func newNotifierPlugin(config Config) (*notifierPlugin, error) {
|
||||
p := ¬ifierPlugin{
|
||||
config: config,
|
||||
}
|
||||
if err := p.initialize(); err != nil {
|
||||
logger.Warn(logSender, "", "unable to create notifier plugin: %v, config %v", err, config)
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *notifierPlugin) exited() bool {
|
||||
return p.client.Exited()
|
||||
}
|
||||
|
||||
func (p *notifierPlugin) cleanup() {
|
||||
p.client.Kill()
|
||||
}
|
||||
|
||||
func (p *notifierPlugin) initialize() error {
|
||||
killProcess(p.config.Cmd)
|
||||
logger.Debug(logSender, "", "create new plugin %v", p.config.Cmd)
|
||||
if !p.config.NotifierOptions.hasActions() {
|
||||
return fmt.Errorf("no actions defined for the notifier plugin %v", p.config.Cmd)
|
||||
}
|
||||
var secureConfig *plugin.SecureConfig
|
||||
if p.config.SHA256Sum != "" {
|
||||
secureConfig.Checksum = []byte(p.config.SHA256Sum)
|
||||
secureConfig.Hash = sha256.New()
|
||||
}
|
||||
client := plugin.NewClient(&plugin.ClientConfig{
|
||||
HandshakeConfig: notifier.Handshake,
|
||||
Plugins: notifier.PluginMap,
|
||||
Cmd: exec.Command(p.config.Cmd, p.config.Args...),
|
||||
AllowedProtocols: []plugin.Protocol{
|
||||
plugin.ProtocolGRPC,
|
||||
},
|
||||
AutoMTLS: p.config.AutoMTLS,
|
||||
SecureConfig: secureConfig,
|
||||
Managed: false,
|
||||
Logger: &logger.HCLogAdapter{
|
||||
Logger: hclog.New(&hclog.LoggerOptions{
|
||||
Name: fmt.Sprintf("%v.%v", logSender, notifier.PluginName),
|
||||
Level: pluginsLogLevel,
|
||||
DisableTime: true,
|
||||
}),
|
||||
},
|
||||
})
|
||||
rpcClient, err := client.Client()
|
||||
if err != nil {
|
||||
logger.Debug(logSender, "", "unable to get rpc client for plugin %v: %v", p.config.Cmd, err)
|
||||
return err
|
||||
}
|
||||
raw, err := rpcClient.Dispense(notifier.PluginName)
|
||||
if err != nil {
|
||||
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for plugin %v: %v",
|
||||
notifier.PluginName, p.config.Cmd, err)
|
||||
return err
|
||||
}
|
||||
|
||||
p.client = client
|
||||
p.notifier = raw.(notifier.Notifier)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *notifierPlugin) notifyFsAction(action, username, fsPath, fsTargetPath, sshCmd, protocol string, fileSize int64, errAction error) {
|
||||
if !util.IsStringInSlice(action, p.config.NotifierOptions.FsEvents) {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
status := 1
|
||||
if errAction != nil {
|
||||
status = 0
|
||||
}
|
||||
if err := p.notifier.NotifyFsEvent(action, username, fsPath, fsTargetPath, sshCmd, protocol, fileSize, status); err != nil {
|
||||
logger.Warn(logSender, "", "unable to send fs action notification to plugin %v: %v", p.config.Cmd, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (p *notifierPlugin) notifyUserAction(action string, user Renderer) {
|
||||
if !util.IsStringInSlice(action, p.config.NotifierOptions.UserEvents) {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
userAsJSON, err := user.RenderAsJSON(action != "delete")
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "unable to render user as json for action %v: %v", action, err)
|
||||
return
|
||||
}
|
||||
if err := p.notifier.NotifyUserEvent(action, userAsJSON); err != nil {
|
||||
logger.Warn(logSender, "", "unable to send user action notification to plugin %v: %v", p.config.Cmd, err)
|
||||
}
|
||||
}()
|
||||
}
|
72
sdk/plugin/notifier/grpc.go
Normal file
72
sdk/plugin/notifier/grpc.go
Normal file
|
@ -0,0 +1,72 @@
|
|||
package notifier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin/notifier/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
rpcTimeout = 20 * time.Second
|
||||
)
|
||||
|
||||
// GRPCClient is an implementation of Notifier interface that talks over RPC.
|
||||
type GRPCClient struct {
|
||||
client proto.NotifierClient
|
||||
}
|
||||
|
||||
// NotifyFsEvent implements the Notifier interface
|
||||
func (c *GRPCClient) NotifyFsEvent(action, username, fsPath, fsTargetPath, sshCmd, protocol string, fileSize int64, status int) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), rpcTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := c.client.SendFsEvent(ctx, &proto.FsEvent{
|
||||
Timestamp: timestamppb.New(time.Now()),
|
||||
Action: action,
|
||||
Username: username,
|
||||
FsPath: fsPath,
|
||||
FsTargetPath: fsTargetPath,
|
||||
SshCmd: sshCmd,
|
||||
FileSize: fileSize,
|
||||
Protocol: protocol,
|
||||
Status: int32(status),
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NotifyUserEvent implements the Notifier interface
|
||||
func (c *GRPCClient) NotifyUserEvent(action string, user []byte) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), rpcTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := c.client.SendUserEvent(ctx, &proto.UserEvent{
|
||||
Timestamp: timestamppb.New(time.Now()),
|
||||
Action: action,
|
||||
User: user,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GRPCServer defines the gRPC server that GRPCClient talks to.
|
||||
type GRPCServer struct {
|
||||
Impl Notifier
|
||||
}
|
||||
|
||||
// SendFsEvent implements the serve side fs notify method
|
||||
func (s *GRPCServer) SendFsEvent(ctx context.Context, req *proto.FsEvent) (*emptypb.Empty, error) {
|
||||
err := s.Impl.NotifyFsEvent(req.Action, req.Username, req.FsPath, req.FsTargetPath, req.SshCmd,
|
||||
req.Protocol, req.FileSize, int(req.Status))
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
||||
|
||||
// SendUserEvent implements the serve side user notify method
|
||||
func (s *GRPCServer) SendUserEvent(ctx context.Context, req *proto.UserEvent) (*emptypb.Empty, error) {
|
||||
err := s.Impl.NotifyUserEvent(req.Action, req.User)
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
57
sdk/plugin/notifier/notifier.go
Normal file
57
sdk/plugin/notifier/notifier.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
// Package notifier defines the implementation for event notifier plugin.
|
||||
// Notifier plugins allow to receive filesystem events such as file uploads,
|
||||
// downloads etc. and user events such as add, update, delete.
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/go-plugin"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin/notifier/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
// PluginName defines the name for a notifier plugin
|
||||
PluginName = "notifier"
|
||||
)
|
||||
|
||||
// Handshake is a common handshake that is shared by plugin and host.
|
||||
var Handshake = plugin.HandshakeConfig{
|
||||
ProtocolVersion: 1,
|
||||
MagicCookieKey: "SFTPGO_NOTIFIER_PLUGIN",
|
||||
MagicCookieValue: "c499b98b-cd59-4df2-92b3-6268817f4d80",
|
||||
}
|
||||
|
||||
// PluginMap is the map of plugins we can dispense.
|
||||
var PluginMap = map[string]plugin.Plugin{
|
||||
PluginName: &Plugin{},
|
||||
}
|
||||
|
||||
// Notifier defines the interface for notifiers plugins
|
||||
type Notifier interface {
|
||||
NotifyFsEvent(action, username, fsPath, fsTargetPath, sshCmd, protocol string, fileSize int64, status int) error
|
||||
NotifyUserEvent(action string, user []byte) error
|
||||
}
|
||||
|
||||
// Plugin defines the implementation to serve/connect to a notifier plugin
|
||||
type Plugin struct {
|
||||
plugin.Plugin
|
||||
Impl Notifier
|
||||
}
|
||||
|
||||
// GRPCServer defines the GRPC server implementation for this plugin
|
||||
func (p *Plugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
|
||||
proto.RegisterNotifierServer(s, &GRPCServer{
|
||||
Impl: p.Impl,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// GRPCClient defines the GRPC client implementation for this plugin
|
||||
func (p *Plugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
|
||||
return &GRPCClient{
|
||||
client: proto.NewNotifierClient(c),
|
||||
}, nil
|
||||
}
|
448
sdk/plugin/notifier/proto/notifier.pb.go
Normal file
448
sdk/plugin/notifier/proto/notifier.pb.go
Normal file
|
@ -0,0 +1,448 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.17.3
|
||||
// source: notifier/proto/notifier.proto
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type FsEvent struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"`
|
||||
Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"`
|
||||
FsPath string `protobuf:"bytes,4,opt,name=fs_path,json=fsPath,proto3" json:"fs_path,omitempty"`
|
||||
FsTargetPath string `protobuf:"bytes,5,opt,name=fs_target_path,json=fsTargetPath,proto3" json:"fs_target_path,omitempty"`
|
||||
SshCmd string `protobuf:"bytes,6,opt,name=ssh_cmd,json=sshCmd,proto3" json:"ssh_cmd,omitempty"`
|
||||
FileSize int64 `protobuf:"varint,7,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"`
|
||||
Protocol string `protobuf:"bytes,8,opt,name=protocol,proto3" json:"protocol,omitempty"`
|
||||
Status int32 `protobuf:"varint,9,opt,name=status,proto3" json:"status,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FsEvent) Reset() {
|
||||
*x = FsEvent{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_notifier_proto_notifier_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FsEvent) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FsEvent) ProtoMessage() {}
|
||||
|
||||
func (x *FsEvent) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_notifier_proto_notifier_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FsEvent.ProtoReflect.Descriptor instead.
|
||||
func (*FsEvent) Descriptor() ([]byte, []int) {
|
||||
return file_notifier_proto_notifier_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *FsEvent) GetTimestamp() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *FsEvent) GetAction() string {
|
||||
if x != nil {
|
||||
return x.Action
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FsEvent) GetUsername() string {
|
||||
if x != nil {
|
||||
return x.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FsEvent) GetFsPath() string {
|
||||
if x != nil {
|
||||
return x.FsPath
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FsEvent) GetFsTargetPath() string {
|
||||
if x != nil {
|
||||
return x.FsTargetPath
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FsEvent) GetSshCmd() string {
|
||||
if x != nil {
|
||||
return x.SshCmd
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FsEvent) GetFileSize() int64 {
|
||||
if x != nil {
|
||||
return x.FileSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *FsEvent) GetProtocol() string {
|
||||
if x != nil {
|
||||
return x.Protocol
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FsEvent) GetStatus() int32 {
|
||||
if x != nil {
|
||||
return x.Status
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type UserEvent struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"`
|
||||
User []byte `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` // SFTPGo user json serialized
|
||||
}
|
||||
|
||||
func (x *UserEvent) Reset() {
|
||||
*x = UserEvent{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_notifier_proto_notifier_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UserEvent) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UserEvent) ProtoMessage() {}
|
||||
|
||||
func (x *UserEvent) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_notifier_proto_notifier_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UserEvent.ProtoReflect.Descriptor instead.
|
||||
func (*UserEvent) Descriptor() ([]byte, []int) {
|
||||
return file_notifier_proto_notifier_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *UserEvent) GetTimestamp() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *UserEvent) GetAction() string {
|
||||
if x != nil {
|
||||
return x.Action
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *UserEvent) GetUser() []byte {
|
||||
if x != nil {
|
||||
return x.User
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_notifier_proto_notifier_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_notifier_proto_notifier_proto_rawDesc = []byte{
|
||||
0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa0, 0x02, 0x0a, 0x07, 0x46, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||
0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
|
||||
0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17,
|
||||
0x0a, 0x07, 0x66, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x06, 0x66, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x24, 0x0a, 0x0e, 0x66, 0x73, 0x5f, 0x74, 0x61,
|
||||
0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0c, 0x66, 0x73, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x17, 0x0a,
|
||||
0x07, 0x73, 0x73, 0x68, 0x5f, 0x63, 0x6d, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
|
||||
0x73, 0x73, 0x68, 0x43, 0x6d, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73,
|
||||
0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53,
|
||||
0x69, 0x7a, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18,
|
||||
0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12,
|
||||
0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52,
|
||||
0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x71, 0x0a, 0x09, 0x55, 0x73, 0x65, 0x72, 0x45,
|
||||
0x76, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
|
||||
0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16,
|
||||
0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
|
||||
0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x32, 0x7c, 0x0a, 0x08, 0x4e, 0x6f,
|
||||
0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x46, 0x73,
|
||||
0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x46, 0x73,
|
||||
0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a,
|
||||
0x0d, 0x53, 0x65, 0x6e, 0x64, 0x55, 0x73, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x10,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||
0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x1b, 0x5a, 0x19, 0x73, 0x64, 0x6b, 0x2f,
|
||||
0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_notifier_proto_notifier_proto_rawDescOnce sync.Once
|
||||
file_notifier_proto_notifier_proto_rawDescData = file_notifier_proto_notifier_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_notifier_proto_notifier_proto_rawDescGZIP() []byte {
|
||||
file_notifier_proto_notifier_proto_rawDescOnce.Do(func() {
|
||||
file_notifier_proto_notifier_proto_rawDescData = protoimpl.X.CompressGZIP(file_notifier_proto_notifier_proto_rawDescData)
|
||||
})
|
||||
return file_notifier_proto_notifier_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_notifier_proto_notifier_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_notifier_proto_notifier_proto_goTypes = []interface{}{
|
||||
(*FsEvent)(nil), // 0: proto.FsEvent
|
||||
(*UserEvent)(nil), // 1: proto.UserEvent
|
||||
(*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
|
||||
(*emptypb.Empty)(nil), // 3: google.protobuf.Empty
|
||||
}
|
||||
var file_notifier_proto_notifier_proto_depIdxs = []int32{
|
||||
2, // 0: proto.FsEvent.timestamp:type_name -> google.protobuf.Timestamp
|
||||
2, // 1: proto.UserEvent.timestamp:type_name -> google.protobuf.Timestamp
|
||||
0, // 2: proto.Notifier.SendFsEvent:input_type -> proto.FsEvent
|
||||
1, // 3: proto.Notifier.SendUserEvent:input_type -> proto.UserEvent
|
||||
3, // 4: proto.Notifier.SendFsEvent:output_type -> google.protobuf.Empty
|
||||
3, // 5: proto.Notifier.SendUserEvent:output_type -> google.protobuf.Empty
|
||||
4, // [4:6] is the sub-list for method output_type
|
||||
2, // [2:4] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_notifier_proto_notifier_proto_init() }
|
||||
func file_notifier_proto_notifier_proto_init() {
|
||||
if File_notifier_proto_notifier_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_notifier_proto_notifier_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FsEvent); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_notifier_proto_notifier_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UserEvent); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_notifier_proto_notifier_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_notifier_proto_notifier_proto_goTypes,
|
||||
DependencyIndexes: file_notifier_proto_notifier_proto_depIdxs,
|
||||
MessageInfos: file_notifier_proto_notifier_proto_msgTypes,
|
||||
}.Build()
|
||||
File_notifier_proto_notifier_proto = out.File
|
||||
file_notifier_proto_notifier_proto_rawDesc = nil
|
||||
file_notifier_proto_notifier_proto_goTypes = nil
|
||||
file_notifier_proto_notifier_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// NotifierClient is the client API for Notifier service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type NotifierClient interface {
|
||||
SendFsEvent(ctx context.Context, in *FsEvent, opts ...grpc.CallOption) (*emptypb.Empty, error)
|
||||
SendUserEvent(ctx context.Context, in *UserEvent, opts ...grpc.CallOption) (*emptypb.Empty, error)
|
||||
}
|
||||
|
||||
type notifierClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewNotifierClient(cc grpc.ClientConnInterface) NotifierClient {
|
||||
return ¬ifierClient{cc}
|
||||
}
|
||||
|
||||
func (c *notifierClient) SendFsEvent(ctx context.Context, in *FsEvent, opts ...grpc.CallOption) (*emptypb.Empty, error) {
|
||||
out := new(emptypb.Empty)
|
||||
err := c.cc.Invoke(ctx, "/proto.Notifier/SendFsEvent", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *notifierClient) SendUserEvent(ctx context.Context, in *UserEvent, opts ...grpc.CallOption) (*emptypb.Empty, error) {
|
||||
out := new(emptypb.Empty)
|
||||
err := c.cc.Invoke(ctx, "/proto.Notifier/SendUserEvent", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// NotifierServer is the server API for Notifier service.
|
||||
type NotifierServer interface {
|
||||
SendFsEvent(context.Context, *FsEvent) (*emptypb.Empty, error)
|
||||
SendUserEvent(context.Context, *UserEvent) (*emptypb.Empty, error)
|
||||
}
|
||||
|
||||
// UnimplementedNotifierServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedNotifierServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedNotifierServer) SendFsEvent(context.Context, *FsEvent) (*emptypb.Empty, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SendFsEvent not implemented")
|
||||
}
|
||||
func (*UnimplementedNotifierServer) SendUserEvent(context.Context, *UserEvent) (*emptypb.Empty, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SendUserEvent not implemented")
|
||||
}
|
||||
|
||||
func RegisterNotifierServer(s *grpc.Server, srv NotifierServer) {
|
||||
s.RegisterService(&_Notifier_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Notifier_SendFsEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(FsEvent)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(NotifierServer).SendFsEvent(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/proto.Notifier/SendFsEvent",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(NotifierServer).SendFsEvent(ctx, req.(*FsEvent))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Notifier_SendUserEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UserEvent)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(NotifierServer).SendUserEvent(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/proto.Notifier/SendUserEvent",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(NotifierServer).SendUserEvent(ctx, req.(*UserEvent))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _Notifier_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "proto.Notifier",
|
||||
HandlerType: (*NotifierServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "SendFsEvent",
|
||||
Handler: _Notifier_SendFsEvent_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "SendUserEvent",
|
||||
Handler: _Notifier_SendUserEvent_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "notifier/proto/notifier.proto",
|
||||
}
|
30
sdk/plugin/notifier/proto/notifier.proto
Normal file
30
sdk/plugin/notifier/proto/notifier.proto
Normal file
|
@ -0,0 +1,30 @@
|
|||
syntax = "proto3";
|
||||
package proto;
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
option go_package = "sdk/plugin/notifier/proto";
|
||||
|
||||
message FsEvent {
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
string action = 2;
|
||||
string username = 3;
|
||||
string fs_path = 4;
|
||||
string fs_target_path = 5;
|
||||
string ssh_cmd = 6;
|
||||
int64 file_size = 7;
|
||||
string protocol = 8;
|
||||
int32 status = 9;
|
||||
}
|
||||
|
||||
message UserEvent {
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
string action = 2;
|
||||
bytes user = 3; // SFTPGo user json serialized
|
||||
}
|
||||
|
||||
service Notifier {
|
||||
rpc SendFsEvent(FsEvent) returns (google.protobuf.Empty);
|
||||
rpc SendUserEvent(UserEvent) returns (google.protobuf.Empty);
|
||||
}
|
166
sdk/plugin/plugin.go
Normal file
166
sdk/plugin/plugin.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
// Package plugin provides support for the SFTPGo plugin system
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin/notifier"
|
||||
)
|
||||
|
||||
const (
|
||||
logSender = "plugins"
|
||||
)
|
||||
|
||||
var (
|
||||
// Handler defines the plugins manager
|
||||
Handler Manager
|
||||
pluginsLogLevel = hclog.Debug
|
||||
)
|
||||
|
||||
// Renderer defines the interface for generic objects rendering
|
||||
type Renderer interface {
|
||||
RenderAsJSON(reload bool) ([]byte, error)
|
||||
}
|
||||
|
||||
// Config defines a plugin configuration
|
||||
type Config struct {
|
||||
// Plugin type
|
||||
Type string `json:"type" mapstructure:"type"`
|
||||
// NotifierOptions defines additional options for notifiers plugins
|
||||
NotifierOptions NotifierConfig `json:"notifier_options" mapstructure:"notifier_options"`
|
||||
// Path to the plugin executable
|
||||
Cmd string `json:"cmd" mapstructure:"cmd"`
|
||||
// Args to pass to the plugin executable
|
||||
Args []string `json:"args" mapstructure:"args"`
|
||||
// SHA256 checksum for the plugin executable.
|
||||
// If not empty it will be used to verify the integrity of the executable
|
||||
SHA256Sum string `json:"sha256sum" mapstructure:"sha256sum"`
|
||||
// If enabled the client and the server automatically negotiate mTLS for
|
||||
// transport authentication. This ensures that only the original client will
|
||||
// be allowed to connect to the server, and all other connections will be
|
||||
// rejected. The client will also refuse to connect to any server that isn't
|
||||
// the original instance started by the client.
|
||||
AutoMTLS bool `json:"auto_mtls" mapstructure:"auto_mtls"`
|
||||
}
|
||||
|
||||
// Manager handles enabled plugins
|
||||
type Manager struct {
|
||||
// List of configured plugins
|
||||
Configs []Config `json:"plugins" mapstructure:"plugins"`
|
||||
mu sync.RWMutex
|
||||
notifiers []*notifierPlugin
|
||||
}
|
||||
|
||||
// Initialize initializes the configured plugins
|
||||
func Initialize(configs []Config, logVerbose bool) error {
|
||||
Handler = Manager{
|
||||
Configs: configs,
|
||||
}
|
||||
|
||||
if logVerbose {
|
||||
pluginsLogLevel = hclog.Debug
|
||||
} else {
|
||||
pluginsLogLevel = hclog.Info
|
||||
}
|
||||
|
||||
for _, config := range configs {
|
||||
switch config.Type {
|
||||
case notifier.PluginName:
|
||||
plugin, err := newNotifierPlugin(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
Handler.notifiers = append(Handler.notifiers, plugin)
|
||||
default:
|
||||
return fmt.Errorf("unsupported plugin type: %v", config.Type)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyFsEvent sends the fs event notifications using any defined notifier plugins
|
||||
func (m *Manager) NotifyFsEvent(action, username, fsPath, fsTargetPath, sshCmd, protocol string, fileSize int64, err error) {
|
||||
m.mu.RLock()
|
||||
|
||||
var crashedIdxs []int
|
||||
for idx, n := range m.notifiers {
|
||||
if n.exited() {
|
||||
crashedIdxs = append(crashedIdxs, idx)
|
||||
} else {
|
||||
n.notifyFsAction(action, username, fsPath, fsTargetPath, sshCmd, protocol, fileSize, err)
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.RUnlock()
|
||||
|
||||
if len(crashedIdxs) > 0 {
|
||||
m.restartCrashedNotifiers(crashedIdxs)
|
||||
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
for idx := range crashedIdxs {
|
||||
if !m.notifiers[idx].exited() {
|
||||
m.notifiers[idx].notifyFsAction(action, username, fsPath, fsTargetPath, sshCmd, protocol, fileSize, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyUserEvent sends the user event notifications using any defined notifier plugins
|
||||
func (m *Manager) NotifyUserEvent(action string, user Renderer) {
|
||||
m.mu.RLock()
|
||||
|
||||
var crashedIdxs []int
|
||||
for idx, n := range m.notifiers {
|
||||
if n.exited() {
|
||||
crashedIdxs = append(crashedIdxs, idx)
|
||||
} else {
|
||||
n.notifyUserAction(action, user)
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.RUnlock()
|
||||
|
||||
if len(crashedIdxs) > 0 {
|
||||
m.restartCrashedNotifiers(crashedIdxs)
|
||||
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
for idx := range crashedIdxs {
|
||||
if !m.notifiers[idx].exited() {
|
||||
m.notifiers[idx].notifyUserAction(action, user)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) restartCrashedNotifiers(crashedIdxs []int) {
|
||||
for _, idx := range crashedIdxs {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if m.notifiers[idx].exited() {
|
||||
logger.Info(logSender, "", "try to restart crashed plugin %v", m.Configs[idx].Cmd)
|
||||
plugin, err := newNotifierPlugin(m.Configs[idx])
|
||||
if err == nil {
|
||||
m.notifiers[idx] = plugin
|
||||
} else {
|
||||
logger.Warn(logSender, "", "plugin %v crashed and restart failed: %v", m.Configs[idx].Cmd, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup releases all the active plugins
|
||||
func (m *Manager) Cleanup() {
|
||||
for _, n := range m.notifiers {
|
||||
logger.Debug(logSender, "", "cleanup plugin %v", n.config.Cmd)
|
||||
n.cleanup()
|
||||
}
|
||||
}
|
25
sdk/plugin/util.go
Normal file
25
sdk/plugin/util.go
Normal file
|
@ -0,0 +1,25 @@
|
|||
package plugin
|
||||
|
||||
import (
|
||||
"github.com/shirou/gopsutil/v3/process"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
)
|
||||
|
||||
func killProcess(processPath string) {
|
||||
procs, err := process.Processes()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, p := range procs {
|
||||
cmdLine, err := p.Exe()
|
||||
if err == nil {
|
||||
if cmdLine == processPath {
|
||||
err = p.Kill()
|
||||
logger.Debug(logSender, "", "killed process %v, pid %v, err %v", cmdLine, p.Pid, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.Debug(logSender, "", "no match for plugin process %v", processPath)
|
||||
}
|
2
sdk/sdk.go
Normal file
2
sdk/sdk.go
Normal file
|
@ -0,0 +1,2 @@
|
|||
// Package sdk provides SFTPGo data structures primarily intended for use within plugins
|
||||
package sdk
|
181
sdk/user.go
Normal file
181
sdk/user.go
Normal file
|
@ -0,0 +1,181 @@
|
|||
package sdk
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// Web Client restrictions
|
||||
const (
|
||||
WebClientPubKeyChangeDisabled = "publickey-change-disabled"
|
||||
)
|
||||
|
||||
var (
|
||||
// WebClientOptions defines the available options for the web client interface
|
||||
WebClientOptions = []string{WebClientPubKeyChangeDisabled}
|
||||
)
|
||||
|
||||
// TLSUsername defines the TLS certificate attribute to use as username
|
||||
type TLSUsername string
|
||||
|
||||
// Supported certificate attributes to use as username
|
||||
const (
|
||||
TLSUsernameNone TLSUsername = "None"
|
||||
TLSUsernameCN TLSUsername = "CommonName"
|
||||
)
|
||||
|
||||
// DirectoryPermissions defines permissions for a directory virtual path
|
||||
type DirectoryPermissions struct {
|
||||
Path string
|
||||
Permissions []string
|
||||
}
|
||||
|
||||
// HasPerm returns true if the directory has the specified permissions
|
||||
func (d *DirectoryPermissions) HasPerm(perm string) bool {
|
||||
return util.IsStringInSlice(perm, d.Permissions)
|
||||
}
|
||||
|
||||
// PatternsFilter defines filters based on shell like patterns.
|
||||
// These restrictions do not apply to files listing for performance reasons, so
|
||||
// a denied file cannot be downloaded/overwritten/renamed but will still be
|
||||
// in the list of files.
|
||||
// System commands such as Git and rsync interacts with the filesystem directly
|
||||
// and they are not aware about these restrictions so they are not allowed
|
||||
// inside paths with extensions filters
|
||||
type PatternsFilter struct {
|
||||
// Virtual path, if no other specific filter is defined, the filter apply for
|
||||
// sub directories too.
|
||||
// For example if filters are defined for the paths "/" and "/sub" then the
|
||||
// filters for "/" are applied for any file outside the "/sub" directory
|
||||
Path string `json:"path"`
|
||||
// files with these, case insensitive, patterns are allowed.
|
||||
// Denied file patterns are evaluated before the allowed ones
|
||||
AllowedPatterns []string `json:"allowed_patterns,omitempty"`
|
||||
// files with these, case insensitive, patterns are not allowed.
|
||||
// Denied file patterns are evaluated before the allowed ones
|
||||
DeniedPatterns []string `json:"denied_patterns,omitempty"`
|
||||
}
|
||||
|
||||
// GetCommaSeparatedPatterns returns the first non empty patterns list comma separated
|
||||
func (p *PatternsFilter) GetCommaSeparatedPatterns() string {
|
||||
if len(p.DeniedPatterns) > 0 {
|
||||
return strings.Join(p.DeniedPatterns, ",")
|
||||
}
|
||||
return strings.Join(p.AllowedPatterns, ",")
|
||||
}
|
||||
|
||||
// IsDenied returns true if the patterns has one or more denied patterns
|
||||
func (p *PatternsFilter) IsDenied() bool {
|
||||
return len(p.DeniedPatterns) > 0
|
||||
}
|
||||
|
||||
// IsAllowed returns true if the patterns has one or more allowed patterns
|
||||
func (p *PatternsFilter) IsAllowed() bool {
|
||||
return len(p.AllowedPatterns) > 0
|
||||
}
|
||||
|
||||
// HooksFilter defines user specific overrides for global hooks
|
||||
type HooksFilter struct {
|
||||
ExternalAuthDisabled bool `json:"external_auth_disabled"`
|
||||
PreLoginDisabled bool `json:"pre_login_disabled"`
|
||||
CheckPasswordDisabled bool `json:"check_password_disabled"`
|
||||
}
|
||||
|
||||
// UserFilters defines additional restrictions for a user
|
||||
// TODO: rename to UserOptions in v3
|
||||
type UserFilters struct {
|
||||
// only clients connecting from these IP/Mask are allowed.
|
||||
// IP/Mask must be in CIDR notation as defined in RFC 4632 and RFC 4291
|
||||
// for example "192.0.2.0/24" or "2001:db8::/32"
|
||||
AllowedIP []string `json:"allowed_ip,omitempty"`
|
||||
// clients connecting from these IP/Mask are not allowed.
|
||||
// Denied rules will be evaluated before allowed ones
|
||||
DeniedIP []string `json:"denied_ip,omitempty"`
|
||||
// these login methods are not allowed.
|
||||
// If null or empty any available login method is allowed
|
||||
DeniedLoginMethods []string `json:"denied_login_methods,omitempty"`
|
||||
// these protocols are not allowed.
|
||||
// If null or empty any available protocol is allowed
|
||||
DeniedProtocols []string `json:"denied_protocols,omitempty"`
|
||||
// filter based on shell patterns.
|
||||
// Please note that these restrictions can be easily bypassed.
|
||||
FilePatterns []PatternsFilter `json:"file_patterns,omitempty"`
|
||||
// max size allowed for a single upload, 0 means unlimited
|
||||
MaxUploadFileSize int64 `json:"max_upload_file_size,omitempty"`
|
||||
// TLS certificate attribute to use as username.
|
||||
// For FTP clients it must match the name provided using the
|
||||
// "USER" command
|
||||
TLSUsername TLSUsername `json:"tls_username,omitempty"`
|
||||
// user specific hook overrides
|
||||
Hooks HooksFilter `json:"hooks,omitempty"`
|
||||
// Disable checks for existence and automatic creation of home directory
|
||||
// and virtual folders.
|
||||
// SFTPGo requires that the user's home directory, virtual folder root,
|
||||
// and intermediate paths to virtual folders exist to work properly.
|
||||
// If you already know that the required directories exist, disabling
|
||||
// these checks will speed up login.
|
||||
// You could, for example, disable these checks after the first login
|
||||
DisableFsChecks bool `json:"disable_fs_checks,omitempty"`
|
||||
// WebClient related configuration options
|
||||
WebClient []string `json:"web_client,omitempty"`
|
||||
}
|
||||
|
||||
type BaseUser struct {
|
||||
// Data provider unique identifier
|
||||
ID int64 `json:"id"`
|
||||
// 1 enabled, 0 disabled (login is not allowed)
|
||||
Status int `json:"status"`
|
||||
// Username
|
||||
Username string `json:"username"`
|
||||
// Account expiration date as unix timestamp in milliseconds. An expired account cannot login.
|
||||
// 0 means no expiration
|
||||
ExpirationDate int64 `json:"expiration_date"`
|
||||
// Password used for password authentication.
|
||||
// For users created using SFTPGo REST API the password is be stored using bcrypt or argon2id hashing algo.
|
||||
// Checking passwords stored with pbkdf2, md5crypt and sha512crypt is supported too.
|
||||
Password string `json:"password,omitempty"`
|
||||
// PublicKeys used for public key authentication. At least one between password and a public key is mandatory
|
||||
PublicKeys []string `json:"public_keys,omitempty"`
|
||||
// The user cannot upload or download files outside this directory. Must be an absolute path
|
||||
HomeDir string `json:"home_dir"`
|
||||
// If sftpgo runs as root system user then the created files and directories will be assigned to this system UID
|
||||
UID int `json:"uid"`
|
||||
// If sftpgo runs as root system user then the created files and directories will be assigned to this system GID
|
||||
GID int `json:"gid"`
|
||||
// Maximum concurrent sessions. 0 means unlimited
|
||||
MaxSessions int `json:"max_sessions"`
|
||||
// Maximum size allowed as bytes. 0 means unlimited
|
||||
QuotaSize int64 `json:"quota_size"`
|
||||
// Maximum number of files allowed. 0 means unlimited
|
||||
QuotaFiles int `json:"quota_files"`
|
||||
// List of the granted permissions
|
||||
Permissions map[string][]string `json:"permissions"`
|
||||
// Used quota as bytes
|
||||
UsedQuotaSize int64 `json:"used_quota_size"`
|
||||
// Used quota as number of files
|
||||
UsedQuotaFiles int `json:"used_quota_files"`
|
||||
// Last quota update as unix timestamp in milliseconds
|
||||
LastQuotaUpdate int64 `json:"last_quota_update"`
|
||||
// Maximum upload bandwidth as KB/s, 0 means unlimited
|
||||
UploadBandwidth int64 `json:"upload_bandwidth"`
|
||||
// Maximum download bandwidth as KB/s, 0 means unlimited
|
||||
DownloadBandwidth int64 `json:"download_bandwidth"`
|
||||
// Last login as unix timestamp in milliseconds
|
||||
LastLogin int64 `json:"last_login"`
|
||||
// Additional restrictions
|
||||
Filters UserFilters `json:"filters"`
|
||||
// optional description, for example full name
|
||||
Description string `json:"description,omitempty"`
|
||||
// free form text field for external systems
|
||||
AdditionalInfo string `json:"additional_info,omitempty"`
|
||||
}
|
||||
|
||||
// User defines a SFTPGo user
|
||||
type User struct {
|
||||
BaseUser
|
||||
// Mapping between virtual paths and virtual folders
|
||||
VirtualFolders []VirtualFolder `json:"virtual_folders,omitempty"`
|
||||
// Filesystem configuration details
|
||||
FsConfig Filesystem `json:"filesystem"`
|
||||
}
|
|
@ -14,7 +14,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/httpd"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
|
@ -51,7 +52,7 @@ func (s *Service) initLogger() {
|
|||
if !s.LogVerbose {
|
||||
logLevel = zerolog.InfoLevel
|
||||
}
|
||||
if !filepath.IsAbs(s.LogFilePath) && utils.IsFileInputValid(s.LogFilePath) {
|
||||
if !filepath.IsAbs(s.LogFilePath) && util.IsFileInputValid(s.LogFilePath) {
|
||||
s.LogFilePath = filepath.Join(s.ConfigDir, s.LogFilePath)
|
||||
}
|
||||
logger.InitLogger(s.LogFilePath, s.LogMaxSize, s.LogMaxBackups, s.LogMaxAge, s.LogCompress, logLevel)
|
||||
|
@ -97,6 +98,11 @@ func (s *Service) Start() error {
|
|||
logger.ErrorToConsole("unable to initialize KMS: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := plugin.Initialize(config.GetPluginsConfig(), s.LogVerbose); err != nil {
|
||||
logger.Error(logSender, "", "unable to initialize plugin: %v", err)
|
||||
logger.ErrorToConsole("unable to initialize plugin: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
providerConf := config.GetProviderConf()
|
||||
|
||||
|
@ -146,7 +152,7 @@ func (s *Service) startServices() {
|
|||
if sftpdConf.ShouldBind() {
|
||||
go func() {
|
||||
redactedConf := sftpdConf
|
||||
redactedConf.KeyboardInteractiveHook = utils.GetRedactedURL(sftpdConf.KeyboardInteractiveHook)
|
||||
redactedConf.KeyboardInteractiveHook = util.GetRedactedURL(sftpdConf.KeyboardInteractiveHook)
|
||||
logger.Debug(logSender, "", "initializing SFTP server with config %+v", redactedConf)
|
||||
if err := sftpdConf.Initialize(s.ConfigDir); err != nil {
|
||||
logger.Error(logSender, "", "could not start SFTP server: %v", err)
|
||||
|
|
|
@ -18,10 +18,10 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/ftpd"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
"github.com/drakkan/sftpgo/v2/webdavd"
|
||||
)
|
||||
|
||||
|
@ -67,7 +67,7 @@ func (s *Service) StartPortableMode(sftpdPort, ftpPort, webdavPort int, enabledS
|
|||
// dynamic ports starts from 49152
|
||||
sftpdConf.Bindings[0].Port = 49152 + rand.Intn(15000)
|
||||
}
|
||||
if utils.IsStringInSlice("*", enabledSSHCommands) {
|
||||
if util.IsStringInSlice("*", enabledSSHCommands) {
|
||||
sftpdConf.EnabledSSHCommands = sftpd.GetSupportedSSHCommands()
|
||||
} else {
|
||||
sftpdConf.EnabledSSHCommands = enabledSSHCommands
|
||||
|
@ -230,9 +230,9 @@ func (s *Service) advertiseServices(advertiseService, advertiseCredentials bool)
|
|||
|
||||
func (s *Service) getPortableDirToServe() string {
|
||||
var dirToServe string
|
||||
if s.PortableUser.FsConfig.Provider == vfs.S3FilesystemProvider {
|
||||
if s.PortableUser.FsConfig.Provider == sdk.S3FilesystemProvider {
|
||||
dirToServe = s.PortableUser.FsConfig.S3Config.KeyPrefix
|
||||
} else if s.PortableUser.FsConfig.Provider == vfs.GCSFilesystemProvider {
|
||||
} else if s.PortableUser.FsConfig.Provider == sdk.GCSFilesystemProvider {
|
||||
dirToServe = s.PortableUser.FsConfig.GCSConfig.KeyPrefix
|
||||
} else {
|
||||
dirToServe = s.PortableUser.HomeDir
|
||||
|
@ -264,19 +264,19 @@ func (s *Service) configurePortableUser() string {
|
|||
func (s *Service) configurePortableSecrets() {
|
||||
// we created the user before to initialize the KMS so we need to create the secret here
|
||||
switch s.PortableUser.FsConfig.Provider {
|
||||
case vfs.S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.S3Config.AccessSecret.GetPayload()
|
||||
s.PortableUser.FsConfig.S3Config.AccessSecret = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.S3Config.AccessSecret = kms.NewPlainSecret(payload)
|
||||
}
|
||||
case vfs.GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.GCSConfig.Credentials.GetPayload()
|
||||
s.PortableUser.FsConfig.GCSConfig.Credentials = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret(payload)
|
||||
}
|
||||
case vfs.AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.AzBlobConfig.AccountKey.GetPayload()
|
||||
s.PortableUser.FsConfig.AzBlobConfig.AccountKey = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
|
@ -287,13 +287,13 @@ func (s *Service) configurePortableSecrets() {
|
|||
if payload != "" {
|
||||
s.PortableUser.FsConfig.AzBlobConfig.SASURL = kms.NewPlainSecret(payload)
|
||||
}
|
||||
case vfs.CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.CryptConfig.Passphrase.GetPayload()
|
||||
s.PortableUser.FsConfig.CryptConfig.Passphrase = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
s.PortableUser.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret(payload)
|
||||
}
|
||||
case vfs.SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
payload := s.PortableUser.FsConfig.SFTPConfig.Password.GetPayload()
|
||||
s.PortableUser.FsConfig.SFTPConfig.Password = kms.NewEmptySecret()
|
||||
if payload != "" {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/ftpd"
|
||||
"github.com/drakkan/sftpgo/v2/httpd"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/telemetry"
|
||||
"github.com/drakkan/sftpgo/v2/webdavd"
|
||||
)
|
||||
|
@ -330,6 +331,7 @@ func (s *WindowsService) Stop() error {
|
|||
return fmt.Errorf("could not retrieve service status: %v", err)
|
||||
}
|
||||
}
|
||||
plugin.Handler.Cleanup()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/ftpd"
|
||||
"github.com/drakkan/sftpgo/v2/httpd"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/telemetry"
|
||||
"github.com/drakkan/sftpgo/v2/webdavd"
|
||||
)
|
||||
|
@ -71,5 +72,6 @@ func handleSIGUSR1() {
|
|||
|
||||
func handleInterrupt() {
|
||||
logger.Debug(logSender, "", "Received interrupt request")
|
||||
plugin.Handler.Cleanup()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/httpdtest"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -487,7 +488,7 @@ func getEncryptedFileSize(size int64) (int64, error) {
|
|||
|
||||
func getTestUserWithCryptFs(usePubKey bool) dataprovider.User {
|
||||
u := getTestUser(usePubKey)
|
||||
u.FsConfig.Provider = vfs.CryptedFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.CryptedFilesystemProvider
|
||||
u.FsConfig.CryptConfig.Passphrase = kms.NewPlainSecret(testPassphrase)
|
||||
return u
|
||||
}
|
||||
|
|
|
@ -21,7 +21,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -155,7 +156,9 @@ func TestUploadResumeInvalidOffset(t *testing.T) {
|
|||
file, err := os.Create(testfile)
|
||||
assert.NoError(t, err)
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "testuser",
|
||||
},
|
||||
}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
conn := common.NewBaseConnection("", common.ProtocolSFTP, "", user)
|
||||
|
@ -183,7 +186,9 @@ func TestReadWriteErrors(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "testuser",
|
||||
},
|
||||
}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
conn := common.NewBaseConnection("", common.ProtocolSFTP, "", user)
|
||||
|
@ -252,7 +257,9 @@ func TestTransferCancelFn(t *testing.T) {
|
|||
isCancelled = true
|
||||
}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "testuser",
|
||||
},
|
||||
}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
conn := common.NewBaseConnection("", common.ProtocolSFTP, "", user)
|
||||
|
@ -377,7 +384,7 @@ func TestSupportedSSHCommands(t *testing.T) {
|
|||
assert.Equal(t, len(supportedSSHCommands), len(cmds))
|
||||
|
||||
for _, c := range cmds {
|
||||
assert.True(t, utils.IsStringInSlice(c, supportedSSHCommands))
|
||||
assert.True(t, util.IsStringInSlice(c, supportedSSHCommands))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -588,11 +595,13 @@ func TestCommandsWithExtensionsFilter(t *testing.T) {
|
|||
defer server.Close()
|
||||
defer client.Close()
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test",
|
||||
HomeDir: os.TempDir(),
|
||||
Status: 1,
|
||||
},
|
||||
}
|
||||
user.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
user.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/subdir",
|
||||
AllowedPatterns: []string{".jpg"},
|
||||
|
@ -654,12 +663,14 @@ func TestSSHCommandsRemoteFs(t *testing.T) {
|
|||
}
|
||||
user := dataprovider.User{}
|
||||
user.FsConfig = vfs.Filesystem{
|
||||
Provider: vfs.S3FilesystemProvider,
|
||||
Provider: sdk.S3FilesystemProvider,
|
||||
S3Config: vfs.S3FsConfig{
|
||||
S3FsConfig: sdk.S3FsConfig{
|
||||
Bucket: "s3bucket",
|
||||
Endpoint: "endpoint",
|
||||
Region: "eu-west-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
connection := &Connection{
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, "", user),
|
||||
|
@ -702,7 +713,9 @@ func TestSSHCmdGetFsErrors(t *testing.T) {
|
|||
StdErrBuffer: bytes.NewBuffer(stdErrBuf),
|
||||
}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: "relative path",
|
||||
},
|
||||
}
|
||||
user.Permissions = map[string][]string{}
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -754,8 +767,10 @@ func TestGitVirtualFolders(t *testing.T) {
|
|||
permissions := make(map[string][]string)
|
||||
permissions["/"] = []string{dataprovider.PermAny}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Permissions: permissions,
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
conn := &Connection{
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, "", user),
|
||||
|
@ -800,8 +815,10 @@ func TestRsyncOptions(t *testing.T) {
|
|||
permissions := make(map[string][]string)
|
||||
permissions["/"] = []string{dataprovider.PermAny}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Permissions: permissions,
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
conn := &Connection{
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, "", user),
|
||||
|
@ -813,7 +830,7 @@ func TestRsyncOptions(t *testing.T) {
|
|||
}
|
||||
cmd, err := sshCmd.getSystemCommand()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, utils.IsStringInSlice("--safe-links", cmd.cmd.Args),
|
||||
assert.True(t, util.IsStringInSlice("--safe-links", cmd.cmd.Args),
|
||||
"--safe-links must be added if the user has the create symlinks permission")
|
||||
|
||||
permissions["/"] = []string{dataprovider.PermDownload, dataprovider.PermUpload, dataprovider.PermCreateDirs,
|
||||
|
@ -830,7 +847,7 @@ func TestRsyncOptions(t *testing.T) {
|
|||
}
|
||||
cmd, err = sshCmd.getSystemCommand()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, utils.IsStringInSlice("--munge-links", cmd.cmd.Args),
|
||||
assert.True(t, util.IsStringInSlice("--munge-links", cmd.cmd.Args),
|
||||
"--munge-links must be added if the user has the create symlinks permission")
|
||||
|
||||
sshCmd.connection.User.VirtualFolders = append(sshCmd.connection.User.VirtualFolders, vfs.VirtualFolder{
|
||||
|
@ -847,8 +864,10 @@ func TestSystemCommandSizeForPath(t *testing.T) {
|
|||
permissions := make(map[string][]string)
|
||||
permissions["/"] = []string{dataprovider.PermAny}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Permissions: permissions,
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
fs, err := user.GetFilesystem("123")
|
||||
assert.NoError(t, err)
|
||||
|
@ -909,8 +928,10 @@ func TestSystemCommandErrors(t *testing.T) {
|
|||
err = os.WriteFile(filepath.Join(homeDir, "afile"), []byte("content"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Permissions: permissions,
|
||||
HomeDir: homeDir,
|
||||
},
|
||||
}
|
||||
fs, err := user.GetFilesystem("123")
|
||||
assert.NoError(t, err)
|
||||
|
@ -987,7 +1008,7 @@ func TestSystemCommandErrors(t *testing.T) {
|
|||
func TestCommandGetFsError(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
},
|
||||
}
|
||||
conn := &Connection{
|
||||
|
@ -1095,8 +1116,10 @@ func TestSCPUploadError(t *testing.T) {
|
|||
WriteError: writeErr,
|
||||
}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: filepath.Join(os.TempDir()),
|
||||
Permissions: make(map[string][]string),
|
||||
},
|
||||
}
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
||||
|
@ -1141,7 +1164,9 @@ func TestSCPInvalidEndDir(t *testing.T) {
|
|||
}
|
||||
connection := &Connection{
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, "", dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}),
|
||||
channel: &mockSSHChannel,
|
||||
}
|
||||
|
@ -1167,7 +1192,9 @@ func TestSCPParseUploadMessage(t *testing.T) {
|
|||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
connection := &Connection{
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSFTP, "", dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}),
|
||||
channel: &mockSSHChannel,
|
||||
}
|
||||
|
@ -1422,7 +1449,9 @@ func TestSCPRecursiveDownloadErrors(t *testing.T) {
|
|||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
||||
connection := &Connection{
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}),
|
||||
channel: &mockSSHChannel,
|
||||
}
|
||||
|
@ -1542,7 +1571,7 @@ func TestSCPDownloadFileData(t *testing.T) {
|
|||
}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
connection := &Connection{
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", dataprovider.User{HomeDir: os.TempDir()}),
|
||||
BaseConnection: common.NewBaseConnection("", common.ProtocolSCP, "", dataprovider.User{BaseUser: sdk.BaseUser{HomeDir: os.TempDir()}}),
|
||||
channel: &mockSSHChannelReadErr,
|
||||
}
|
||||
scpCommand := scpCommand{
|
||||
|
@ -1588,7 +1617,9 @@ func TestSCPUploadFiledata(t *testing.T) {
|
|||
WriteError: writeErr,
|
||||
}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "testuser",
|
||||
},
|
||||
}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
connection := &Connection{
|
||||
|
@ -1677,7 +1708,9 @@ func TestUploadError(t *testing.T) {
|
|||
common.Config.UploadMode = common.UploadModeAtomic
|
||||
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "testuser",
|
||||
},
|
||||
}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
connection := &Connection{
|
||||
|
@ -1711,14 +1744,18 @@ func TestUploadError(t *testing.T) {
|
|||
|
||||
func TestTransferFailingReader(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "testuser",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret("crypt secret"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -1770,13 +1807,13 @@ func TestConnectionStatusStruct(t *testing.T) {
|
|||
var transfers []common.ConnectionTransfer
|
||||
transferUL := common.ConnectionTransfer{
|
||||
OperationType: "upload",
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
StartTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
Size: 123,
|
||||
VirtualPath: "/test.upload",
|
||||
}
|
||||
transferDL := common.ConnectionTransfer{
|
||||
OperationType: "download",
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
StartTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
Size: 123,
|
||||
VirtualPath: "/test.download",
|
||||
}
|
||||
|
@ -1787,8 +1824,8 @@ func TestConnectionStatusStruct(t *testing.T) {
|
|||
ConnectionID: "123",
|
||||
ClientVersion: "fakeClient-1.0.0",
|
||||
RemoteAddress: "127.0.0.1:1234",
|
||||
ConnectionTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
LastActivity: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
ConnectionTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
LastActivity: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
Protocol: "SFTP",
|
||||
Transfers: transfers,
|
||||
}
|
||||
|
@ -1878,8 +1915,10 @@ func TestRecursiveCopyErrors(t *testing.T) {
|
|||
permissions := make(map[string][]string)
|
||||
permissions["/"] = []string{dataprovider.PermAny}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Permissions: permissions,
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
fs, err := user.GetFilesystem("123")
|
||||
assert.NoError(t, err)
|
||||
|
@ -1900,13 +1939,15 @@ func TestSFTPSubSystem(t *testing.T) {
|
|||
permissions := make(map[string][]string)
|
||||
permissions["/"] = []string{dataprovider.PermAny}
|
||||
user := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Permissions: permissions,
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
user.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
err := ServeSubSystemConnection(user, "connID", nil, nil)
|
||||
assert.Error(t, err)
|
||||
user.FsConfig.Provider = vfs.LocalFilesystemProvider
|
||||
user.FsConfig.Provider = sdk.LocalFilesystemProvider
|
||||
|
||||
buf := make([]byte, 0, 4096)
|
||||
stdErrBuf := make([]byte, 0, 4096)
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -548,11 +548,11 @@ func (c *scpCommand) getCommandType() string {
|
|||
}
|
||||
|
||||
func (c *scpCommand) sendFileTime() bool {
|
||||
return utils.IsStringInSlice("-p", c.args)
|
||||
return util.IsStringInSlice("-p", c.args)
|
||||
}
|
||||
|
||||
func (c *scpCommand) isRecursive() bool {
|
||||
return utils.IsStringInSlice("-r", c.args)
|
||||
return util.IsStringInSlice("-r", c.args)
|
||||
}
|
||||
|
||||
// read the SCP confirmation message and the optional text message
|
||||
|
|
|
@ -19,8 +19,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -210,7 +210,7 @@ func (c *Configuration) Initialize(configDir string) error {
|
|||
|
||||
go func(binding Binding) {
|
||||
addr := binding.GetAddress()
|
||||
utils.CheckTCP4Port(binding.Port)
|
||||
util.CheckTCP4Port(binding.Port)
|
||||
listener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error starting listener on address %v: %v", addr, err)
|
||||
|
@ -355,7 +355,7 @@ func (c *Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.Serve
|
|||
}
|
||||
}()
|
||||
|
||||
ipAddr := utils.GetIPFromRemoteAddress(conn.RemoteAddr().String())
|
||||
ipAddr := util.GetIPFromRemoteAddress(conn.RemoteAddr().String())
|
||||
common.Connections.AddClientConnection(ipAddr)
|
||||
defer common.Connections.RemoveClientConnection(ipAddr)
|
||||
|
||||
|
@ -515,7 +515,7 @@ func checkAuthError(ip string, err error) {
|
|||
}
|
||||
} else {
|
||||
logger.ConnectionFailedLog("", ip, dataprovider.LoginMethodNoAuthTryed, common.ProtocolSSH, err.Error())
|
||||
metrics.AddNoAuthTryed()
|
||||
metric.AddNoAuthTryed()
|
||||
common.AddDefenderEvent(ip, common.HostEventNoLoginTried)
|
||||
dataprovider.ExecutePostLoginHook(&dataprovider.User{}, dataprovider.LoginMethodNoAuthTryed, ip, common.ProtocolSSH, err)
|
||||
}
|
||||
|
@ -531,7 +531,7 @@ func loginUser(user *dataprovider.User, loginMethod, publicKey string, conn ssh.
|
|||
user.Username, user.HomeDir)
|
||||
return nil, fmt.Errorf("cannot login user with invalid home dir: %#v", user.HomeDir)
|
||||
}
|
||||
if utils.IsStringInSlice(common.ProtocolSSH, user.Filters.DeniedProtocols) {
|
||||
if util.IsStringInSlice(common.ProtocolSSH, user.Filters.DeniedProtocols) {
|
||||
logger.Debug(logSender, connectionID, "cannot login user %#v, protocol SSH is not allowed", user.Username)
|
||||
return nil, fmt.Errorf("protocol SSH is not allowed for user %#v", user.Username)
|
||||
}
|
||||
|
@ -569,13 +569,13 @@ func loginUser(user *dataprovider.User, loginMethod, publicKey string, conn ssh.
|
|||
}
|
||||
|
||||
func (c *Configuration) checkSSHCommands() {
|
||||
if utils.IsStringInSlice("*", c.EnabledSSHCommands) {
|
||||
if util.IsStringInSlice("*", c.EnabledSSHCommands) {
|
||||
c.EnabledSSHCommands = GetSupportedSSHCommands()
|
||||
return
|
||||
}
|
||||
sshCommands := []string{}
|
||||
for _, command := range c.EnabledSSHCommands {
|
||||
if utils.IsStringInSlice(command, supportedSSHCommands) {
|
||||
if util.IsStringInSlice(command, supportedSSHCommands) {
|
||||
sshCommands = append(sshCommands, command)
|
||||
} else {
|
||||
logger.Warn(logSender, "", "unsupported ssh command: %#v ignored", command)
|
||||
|
@ -594,11 +594,11 @@ func (c *Configuration) generateDefaultHostKeys(configDir string) error {
|
|||
logger.Info(logSender, "", "No host keys configured and %#v does not exist; try to create a new host key", autoFile)
|
||||
logger.InfoToConsole("No host keys configured and %#v does not exist; try to create a new host key", autoFile)
|
||||
if k == defaultPrivateRSAKeyName {
|
||||
err = utils.GenerateRSAKeys(autoFile)
|
||||
err = util.GenerateRSAKeys(autoFile)
|
||||
} else if k == defaultPrivateECDSAKeyName {
|
||||
err = utils.GenerateECDSAKeys(autoFile)
|
||||
err = util.GenerateECDSAKeys(autoFile)
|
||||
} else {
|
||||
err = utils.GenerateEd25519Keys(autoFile)
|
||||
err = util.GenerateEd25519Keys(autoFile)
|
||||
}
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error creating host key %#v: %v", autoFile, err)
|
||||
|
@ -621,7 +621,7 @@ func (c *Configuration) checkHostKeyAutoGeneration(configDir string) error {
|
|||
case defaultPrivateRSAKeyName:
|
||||
logger.Info(logSender, "", "try to create non-existent host key %#v", k)
|
||||
logger.InfoToConsole("try to create non-existent host key %#v", k)
|
||||
err = utils.GenerateRSAKeys(k)
|
||||
err = util.GenerateRSAKeys(k)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error creating host key %#v: %v", k, err)
|
||||
logger.WarnToConsole("error creating host key %#v: %v", k, err)
|
||||
|
@ -630,7 +630,7 @@ func (c *Configuration) checkHostKeyAutoGeneration(configDir string) error {
|
|||
case defaultPrivateECDSAKeyName:
|
||||
logger.Info(logSender, "", "try to create non-existent host key %#v", k)
|
||||
logger.InfoToConsole("try to create non-existent host key %#v", k)
|
||||
err = utils.GenerateECDSAKeys(k)
|
||||
err = util.GenerateECDSAKeys(k)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error creating host key %#v: %v", k, err)
|
||||
logger.WarnToConsole("error creating host key %#v: %v", k, err)
|
||||
|
@ -639,7 +639,7 @@ func (c *Configuration) checkHostKeyAutoGeneration(configDir string) error {
|
|||
case defaultPrivateEd25519KeyName:
|
||||
logger.Info(logSender, "", "try to create non-existent host key %#v", k)
|
||||
logger.InfoToConsole("try to create non-existent host key %#v", k)
|
||||
err = utils.GenerateEd25519Keys(k)
|
||||
err = util.GenerateEd25519Keys(k)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error creating host key %#v: %v", k, err)
|
||||
logger.WarnToConsole("error creating host key %#v: %v", k, err)
|
||||
|
@ -667,7 +667,7 @@ func (c *Configuration) checkAndLoadHostKeys(configDir string, serverConfig *ssh
|
|||
}
|
||||
serviceStatus.HostKeys = nil
|
||||
for _, hostKey := range c.HostKeys {
|
||||
if !utils.IsFileInputValid(hostKey) {
|
||||
if !util.IsFileInputValid(hostKey) {
|
||||
logger.Warn(logSender, "", "unable to load invalid host key %#v", hostKey)
|
||||
logger.WarnToConsole("unable to load invalid host key %#v", hostKey)
|
||||
continue
|
||||
|
@ -708,7 +708,7 @@ func (c *Configuration) checkAndLoadHostKeys(configDir string, serverConfig *ssh
|
|||
|
||||
func (c *Configuration) initializeCertChecker(configDir string) error {
|
||||
for _, keyPath := range c.TrustedUserCAKeys {
|
||||
if !utils.IsFileInputValid(keyPath) {
|
||||
if !util.IsFileInputValid(keyPath) {
|
||||
logger.Warn(logSender, "", "unable to load invalid trusted user CA key: %#v", keyPath)
|
||||
logger.WarnToConsole("unable to load invalid trusted user CA key: %#v", keyPath)
|
||||
continue
|
||||
|
@ -755,7 +755,7 @@ func (c *Configuration) validatePublicKeyCredentials(conn ssh.ConnMetadata, pubK
|
|||
|
||||
connectionID := hex.EncodeToString(conn.SessionID())
|
||||
method := dataprovider.SSHLoginMethodPublicKey
|
||||
ipAddr := utils.GetIPFromRemoteAddress(conn.RemoteAddr().String())
|
||||
ipAddr := util.GetIPFromRemoteAddress(conn.RemoteAddr().String())
|
||||
cert, ok := pubKey.(*ssh.Certificate)
|
||||
if ok {
|
||||
if cert.CertType != ssh.UserCert {
|
||||
|
@ -808,7 +808,7 @@ func (c *Configuration) validatePasswordCredentials(conn ssh.ConnMetadata, pass
|
|||
if len(conn.PartialSuccessMethods()) == 1 {
|
||||
method = dataprovider.SSHLoginMethodKeyAndPassword
|
||||
}
|
||||
ipAddr := utils.GetIPFromRemoteAddress(conn.RemoteAddr().String())
|
||||
ipAddr := util.GetIPFromRemoteAddress(conn.RemoteAddr().String())
|
||||
if user, err = dataprovider.CheckUserAndPass(conn.User(), string(pass), ipAddr, common.ProtocolSSH); err == nil {
|
||||
sshPerm, err = loginUser(&user, method, "", conn)
|
||||
}
|
||||
|
@ -826,7 +826,7 @@ func (c *Configuration) validateKeyboardInteractiveCredentials(conn ssh.ConnMeta
|
|||
if len(conn.PartialSuccessMethods()) == 1 {
|
||||
method = dataprovider.SSHLoginMethodKeyAndKeyboardInt
|
||||
}
|
||||
ipAddr := utils.GetIPFromRemoteAddress(conn.RemoteAddr().String())
|
||||
ipAddr := util.GetIPFromRemoteAddress(conn.RemoteAddr().String())
|
||||
if user, err = dataprovider.CheckKeyboardInteractiveAuth(conn.User(), c.KeyboardInteractiveHook, client,
|
||||
ipAddr, common.ProtocolSSH); err == nil {
|
||||
sshPerm, err = loginUser(&user, method, "", conn)
|
||||
|
@ -837,7 +837,7 @@ func (c *Configuration) validateKeyboardInteractiveCredentials(conn ssh.ConnMeta
|
|||
}
|
||||
|
||||
func updateLoginMetrics(user *dataprovider.User, ip, method string, err error) {
|
||||
metrics.AddLoginAttempt(method)
|
||||
metric.AddLoginAttempt(method)
|
||||
if err != nil {
|
||||
logger.ConnectionFailedLog(user.Username, ip, method, common.ProtocolSSH, err.Error())
|
||||
if method != dataprovider.SSHLoginMethodPublicKey {
|
||||
|
@ -845,12 +845,12 @@ func updateLoginMetrics(user *dataprovider.User, ip, method string, err error) {
|
|||
// record failed login key auth only once for session if the
|
||||
// authentication fails in checkAuthError
|
||||
event := common.HostEventLoginFailed
|
||||
if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
event = common.HostEventUserNotFound
|
||||
}
|
||||
common.AddDefenderEvent(ip, event)
|
||||
}
|
||||
}
|
||||
metrics.AddLoginResult(method, err)
|
||||
metric.AddLoginResult(method, err)
|
||||
dataprovider.ExecutePostLoginHook(user, method, ip, common.ProtocolSSH, err)
|
||||
}
|
||||
|
|
|
@ -42,8 +42,9 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/httpdtest"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -1865,7 +1866,7 @@ func TestLoginUserExpiration(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.Greater(t, user.LastLogin, int64(0), "last login must be updated after a successful login: %v", user.LastLogin)
|
||||
}
|
||||
user.ExpirationDate = utils.GetTimeAsMsSinceEpoch(time.Now()) - 120000
|
||||
user.ExpirationDate = util.GetTimeAsMsSinceEpoch(time.Now()) - 120000
|
||||
user, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
conn, client, err = getSftpClient(user, usePubKey)
|
||||
|
@ -1873,7 +1874,7 @@ func TestLoginUserExpiration(t *testing.T) {
|
|||
client.Close()
|
||||
conn.Close()
|
||||
}
|
||||
user.ExpirationDate = utils.GetTimeAsMsSinceEpoch(time.Now()) + 120000
|
||||
user.ExpirationDate = util.GetTimeAsMsSinceEpoch(time.Now()) + 120000
|
||||
_, _, err = httpdtest.UpdateUser(user, http.StatusOK, "")
|
||||
assert.NoError(t, err)
|
||||
conn, client, err = getSftpClient(user, usePubKey)
|
||||
|
@ -1891,7 +1892,7 @@ func TestLoginUserExpiration(t *testing.T) {
|
|||
func TestLoginWithDatabaseCredentials(t *testing.T) {
|
||||
usePubKey := true
|
||||
u := getTestUser(usePubKey)
|
||||
u.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
u.FsConfig.GCSConfig.Bucket = "testbucket"
|
||||
u.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret(`{ "type": "service_account" }`)
|
||||
|
||||
|
@ -1941,7 +1942,7 @@ func TestLoginWithDatabaseCredentials(t *testing.T) {
|
|||
func TestLoginInvalidFs(t *testing.T) {
|
||||
usePubKey := true
|
||||
u := getTestUser(usePubKey)
|
||||
u.FsConfig.Provider = vfs.GCSFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
u.FsConfig.GCSConfig.Bucket = "test"
|
||||
u.FsConfig.GCSConfig.Credentials = kms.NewPlainSecret("invalid JSON for credentials")
|
||||
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||
|
@ -3680,7 +3681,7 @@ func TestPatternsFilters(t *testing.T) {
|
|||
err = sftpUploadFile(testFilePath, testFileName+".zip", testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
user.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
user.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/",
|
||||
AllowedPatterns: []string{"*.zIp"},
|
||||
|
@ -3953,17 +3954,21 @@ func TestSFTPLoopSimple(t *testing.T) {
|
|||
user2 := getTestSFTPUser(usePubKey)
|
||||
user1.Username += "1"
|
||||
user2.Username += "2"
|
||||
user1.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user2.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user1.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
user2.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
user1.FsConfig.SFTPConfig = vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: user2.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
}
|
||||
user2.FsConfig.SFTPConfig = vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: user1.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
}
|
||||
user1, resp, err := httpdtest.AddUser(user1, http.StatusCreated)
|
||||
assert.NoError(t, err, string(resp))
|
||||
|
@ -4009,28 +4014,34 @@ func TestSFTPLoopVirtualFolders(t *testing.T) {
|
|||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: sftpFloderName,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.SFTPFilesystemProvider,
|
||||
Provider: sdk.SFTPFilesystemProvider,
|
||||
SFTPConfig: vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: user2.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualPath: "/vdir",
|
||||
})
|
||||
|
||||
user2.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user2.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
user2.FsConfig.SFTPConfig = vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: user1.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
}
|
||||
user3.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
user3.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
user3.FsConfig.SFTPConfig = vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: user1.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
}
|
||||
|
||||
user1, resp, err := httpdtest.AddUser(user1, http.StatusCreated)
|
||||
|
@ -4056,20 +4067,22 @@ func TestSFTPLoopVirtualFolders(t *testing.T) {
|
|||
// user1 -> local account with the SFTP virtual folder /vdir to user2
|
||||
// user2 -> local account with the SFTP virtual folder /vdir2 to user3
|
||||
// user3 -> sftp user with user1 as fs
|
||||
user2.FsConfig.Provider = vfs.LocalFilesystemProvider
|
||||
user2.FsConfig.Provider = sdk.LocalFilesystemProvider
|
||||
user2.FsConfig.SFTPConfig = vfs.SFTPFsConfig{}
|
||||
user2.VirtualFolders = append(user2.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: sftpFloderName,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.SFTPFilesystemProvider,
|
||||
Provider: sdk.SFTPFilesystemProvider,
|
||||
SFTPConfig: vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: user3.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualPath: "/vdir2",
|
||||
})
|
||||
user2, _, err = httpdtest.UpdateUser(user2, http.StatusOK, "")
|
||||
|
@ -4116,11 +4129,13 @@ func TestNestedVirtualFolders(t *testing.T) {
|
|||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: folderNameCrypt,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
MappedPath: mappedPathCrypt,
|
||||
},
|
||||
VirtualPath: vdirCryptPath,
|
||||
|
@ -6736,18 +6751,24 @@ func TestRelativePaths(t *testing.T) {
|
|||
filesystems := []vfs.Fs{vfs.NewOsFs("", user.GetHomeDir(), "")}
|
||||
keyPrefix := strings.TrimPrefix(user.GetHomeDir(), "/") + "/"
|
||||
s3config := vfs.S3FsConfig{
|
||||
S3FsConfig: sdk.S3FsConfig{
|
||||
KeyPrefix: keyPrefix,
|
||||
},
|
||||
}
|
||||
s3fs, _ := vfs.NewS3Fs("", user.GetHomeDir(), "", s3config)
|
||||
gcsConfig := vfs.GCSFsConfig{
|
||||
GCSFsConfig: sdk.GCSFsConfig{
|
||||
KeyPrefix: keyPrefix,
|
||||
},
|
||||
}
|
||||
gcsfs, _ := vfs.NewGCSFs("", user.GetHomeDir(), "", gcsConfig)
|
||||
sftpconfig := vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: defaultUsername,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
Prefix: keyPrefix,
|
||||
},
|
||||
}
|
||||
sftpfs, _ := vfs.NewSFTPFs("", "", os.TempDir(), []string{user.Username}, sftpconfig)
|
||||
if runtime.GOOS != osWindows {
|
||||
|
@ -6795,16 +6816,20 @@ func TestResolvePaths(t *testing.T) {
|
|||
filesystems := []vfs.Fs{vfs.NewOsFs("", user.GetHomeDir(), "")}
|
||||
keyPrefix := strings.TrimPrefix(user.GetHomeDir(), "/") + "/"
|
||||
s3config := vfs.S3FsConfig{
|
||||
S3FsConfig: sdk.S3FsConfig{
|
||||
KeyPrefix: keyPrefix,
|
||||
Bucket: "bucket",
|
||||
Region: "us-east-1",
|
||||
},
|
||||
}
|
||||
err = os.MkdirAll(user.GetHomeDir(), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
s3fs, err := vfs.NewS3Fs("", user.GetHomeDir(), "", s3config)
|
||||
assert.NoError(t, err)
|
||||
gcsConfig := vfs.GCSFsConfig{
|
||||
GCSFsConfig: sdk.GCSFsConfig{
|
||||
KeyPrefix: keyPrefix,
|
||||
},
|
||||
}
|
||||
gcsfs, _ := vfs.NewGCSFs("", user.GetHomeDir(), "", gcsConfig)
|
||||
if runtime.GOOS != osWindows {
|
||||
|
@ -6900,20 +6925,20 @@ func TestUserPerms(t *testing.T) {
|
|||
|
||||
func TestFilterFilePatterns(t *testing.T) {
|
||||
user := getTestUser(true)
|
||||
pattern := dataprovider.PatternsFilter{
|
||||
pattern := sdk.PatternsFilter{
|
||||
Path: "/test",
|
||||
AllowedPatterns: []string{"*.jpg", "*.png"},
|
||||
DeniedPatterns: []string{"*.pdf"},
|
||||
}
|
||||
filters := dataprovider.UserFilters{
|
||||
FilePatterns: []dataprovider.PatternsFilter{pattern},
|
||||
filters := sdk.UserFilters{
|
||||
FilePatterns: []sdk.PatternsFilter{pattern},
|
||||
}
|
||||
user.Filters = filters
|
||||
assert.True(t, user.IsFileAllowed("/test/test.jPg"))
|
||||
assert.False(t, user.IsFileAllowed("/test/test.pdf"))
|
||||
assert.True(t, user.IsFileAllowed("/test.pDf"))
|
||||
|
||||
filters.FilePatterns = append(filters.FilePatterns, dataprovider.PatternsFilter{
|
||||
filters.FilePatterns = append(filters.FilePatterns, sdk.PatternsFilter{
|
||||
Path: "/",
|
||||
AllowedPatterns: []string{"*.zip", "*.rar", "*.pdf"},
|
||||
DeniedPatterns: []string{"*.gz"},
|
||||
|
@ -6924,7 +6949,7 @@ func TestFilterFilePatterns(t *testing.T) {
|
|||
assert.False(t, user.IsFileAllowed("/test/sub/test.pdf"))
|
||||
assert.False(t, user.IsFileAllowed("/test1/test.png"))
|
||||
|
||||
filters.FilePatterns = append(filters.FilePatterns, dataprovider.PatternsFilter{
|
||||
filters.FilePatterns = append(filters.FilePatterns, sdk.PatternsFilter{
|
||||
Path: "/test/sub",
|
||||
DeniedPatterns: []string{"*.tar"},
|
||||
})
|
||||
|
@ -6948,8 +6973,8 @@ func TestUserAllowedLoginMethods(t *testing.T) {
|
|||
allowedMethods = user.GetAllowedLoginMethods()
|
||||
assert.Equal(t, 4, len(allowedMethods))
|
||||
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.SSHLoginMethodKeyAndKeyboardInt, allowedMethods))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.SSHLoginMethodKeyAndPassword, allowedMethods))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.SSHLoginMethodKeyAndKeyboardInt, allowedMethods))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.SSHLoginMethodKeyAndPassword, allowedMethods))
|
||||
}
|
||||
|
||||
func TestUserPartialAuth(t *testing.T) {
|
||||
|
@ -7000,11 +7025,11 @@ func TestUserGetNextAuthMethods(t *testing.T) {
|
|||
|
||||
methods = user.GetNextAuthMethods([]string{dataprovider.SSHLoginMethodPublicKey}, true)
|
||||
assert.Equal(t, 2, len(methods))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.LoginMethodPassword, methods))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.SSHLoginMethodKeyboardInteractive, methods))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.LoginMethodPassword, methods))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.SSHLoginMethodKeyboardInteractive, methods))
|
||||
methods = user.GetNextAuthMethods([]string{dataprovider.SSHLoginMethodPublicKey}, false)
|
||||
assert.Equal(t, 1, len(methods))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.SSHLoginMethodKeyboardInteractive, methods))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.SSHLoginMethodKeyboardInteractive, methods))
|
||||
|
||||
user.Filters.DeniedLoginMethods = []string{
|
||||
dataprovider.LoginMethodPassword,
|
||||
|
@ -7014,7 +7039,7 @@ func TestUserGetNextAuthMethods(t *testing.T) {
|
|||
}
|
||||
methods = user.GetNextAuthMethods([]string{dataprovider.SSHLoginMethodPublicKey}, true)
|
||||
assert.Equal(t, 1, len(methods))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.LoginMethodPassword, methods))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.LoginMethodPassword, methods))
|
||||
|
||||
user.Filters.DeniedLoginMethods = []string{
|
||||
dataprovider.LoginMethodPassword,
|
||||
|
@ -7024,7 +7049,7 @@ func TestUserGetNextAuthMethods(t *testing.T) {
|
|||
}
|
||||
methods = user.GetNextAuthMethods([]string{dataprovider.SSHLoginMethodPublicKey}, true)
|
||||
assert.Equal(t, 1, len(methods))
|
||||
assert.True(t, utils.IsStringInSlice(dataprovider.SSHLoginMethodKeyboardInteractive, methods))
|
||||
assert.True(t, util.IsStringInSlice(dataprovider.SSHLoginMethodKeyboardInteractive, methods))
|
||||
}
|
||||
|
||||
func TestUserIsLoginMethodAllowed(t *testing.T) {
|
||||
|
@ -7242,7 +7267,7 @@ func TestStatVFS(t *testing.T) {
|
|||
func TestStatVFSCloudBackend(t *testing.T) {
|
||||
usePubKey := true
|
||||
u := getTestUser(usePubKey)
|
||||
u.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
u.FsConfig.AzBlobConfig.SASURL = kms.NewPlainSecret("https://myaccount.blob.core.windows.net/sasurl")
|
||||
user, _, err := httpdtest.AddUser(u, http.StatusCreated)
|
||||
assert.NoError(t, err)
|
||||
|
@ -7386,7 +7411,7 @@ func TestSSHCopy(t *testing.T) {
|
|||
QuotaFiles: 100,
|
||||
QuotaSize: 0,
|
||||
})
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/",
|
||||
DeniedPatterns: []string{"*.denied"},
|
||||
|
@ -7664,7 +7689,7 @@ func TestSSHCopyQuotaLimits(t *testing.T) {
|
|||
QuotaFiles: 3,
|
||||
QuotaSize: testFileSize + testFileSize1 + 1,
|
||||
})
|
||||
u.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
u.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/",
|
||||
DeniedPatterns: []string{"*.denied"},
|
||||
|
@ -7984,12 +8009,14 @@ func TestSSHRemoveCryptFs(t *testing.T) {
|
|||
Name: folderName2,
|
||||
MappedPath: mappedPath2,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualPath: vdirPath2,
|
||||
QuotaFiles: 100,
|
||||
QuotaSize: 0,
|
||||
|
@ -8444,7 +8471,7 @@ func TestSCPPatternsFilter(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = scpUpload(testFilePath, remoteUpPath, false, false)
|
||||
assert.NoError(t, err)
|
||||
user.Filters.FilePatterns = []dataprovider.PatternsFilter{
|
||||
user.Filters.FilePatterns = []sdk.PatternsFilter{
|
||||
{
|
||||
Path: "/",
|
||||
AllowedPatterns: []string{"*.zip"},
|
||||
|
@ -8570,14 +8597,16 @@ func TestSCPNestedFolders(t *testing.T) {
|
|||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: folderNameSFTP,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.SFTPFilesystemProvider,
|
||||
Provider: sdk.SFTPFilesystemProvider,
|
||||
SFTPConfig: vfs.SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: sftpServerAddr,
|
||||
Username: baseUser.Username,
|
||||
Password: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualPath: vdirSFTPPath,
|
||||
})
|
||||
mappedPathCrypt := filepath.Join(os.TempDir(), "crypt")
|
||||
|
@ -8587,11 +8616,13 @@ func TestSCPNestedFolders(t *testing.T) {
|
|||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: folderNameCrypt,
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: vfs.CryptedFilesystemProvider,
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(defaultPassword),
|
||||
},
|
||||
},
|
||||
},
|
||||
MappedPath: mappedPathCrypt,
|
||||
},
|
||||
VirtualPath: vdirCryptPath,
|
||||
|
@ -9193,11 +9224,13 @@ func waitTCPListening(address string) {
|
|||
|
||||
func getTestUser(usePubKey bool) dataprovider.User {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: defaultUsername,
|
||||
Password: defaultPassword,
|
||||
HomeDir: filepath.Join(homeBasePath, defaultUsername),
|
||||
Status: 1,
|
||||
ExpirationDate: 0,
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = allPerms
|
||||
|
@ -9211,7 +9244,7 @@ func getTestUser(usePubKey bool) dataprovider.User {
|
|||
func getTestSFTPUser(usePubKey bool) dataprovider.User {
|
||||
u := getTestUser(usePubKey)
|
||||
u.Username = defaultSFTPUsername
|
||||
u.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
||||
u.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
u.FsConfig.SFTPConfig.Endpoint = sftpServerAddr
|
||||
u.FsConfig.SFTPConfig.Username = defaultUsername
|
||||
u.FsConfig.SFTPConfig.Password = kms.NewPlainSecret(defaultPassword)
|
||||
|
|
|
@ -23,8 +23,9 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -56,7 +57,7 @@ func processSSHCommand(payload []byte, connection *Connection, enabledSSHCommand
|
|||
name, args, err := parseCommandPayload(msg.Command)
|
||||
connection.Log(logger.LevelDebug, "new ssh command: %#v args: %v num args: %v user: %v, error: %v",
|
||||
name, args, len(args), connection.User.Username, err)
|
||||
if err == nil && utils.IsStringInSlice(name, enabledSSHCommands) {
|
||||
if err == nil && util.IsStringInSlice(name, enabledSSHCommands) {
|
||||
connection.command = msg.Command
|
||||
if name == scpCmdName && len(args) >= 2 {
|
||||
connection.SetProtocol(common.ProtocolSCP)
|
||||
|
@ -99,9 +100,9 @@ func (c *sshCommand) handle() (err error) {
|
|||
defer common.Connections.Remove(c.connection.GetID())
|
||||
|
||||
c.connection.UpdateLastActivity()
|
||||
if utils.IsStringInSlice(c.command, sshHashCommands) {
|
||||
if util.IsStringInSlice(c.command, sshHashCommands) {
|
||||
return c.handleHashCommands()
|
||||
} else if utils.IsStringInSlice(c.command, systemCommands) {
|
||||
} else if util.IsStringInSlice(c.command, systemCommands) {
|
||||
command, err := c.getSystemCommand()
|
||||
if err != nil {
|
||||
return c.sendErrorResponse(err)
|
||||
|
@ -492,11 +493,11 @@ func (c *sshCommand) getSystemCommand() (systemCommand, error) {
|
|||
// If the user cannot create symlinks we add the option --munge-links, if it is not
|
||||
// already set. This should make symlinks unusable (but manually recoverable)
|
||||
if c.connection.User.HasPerm(dataprovider.PermCreateSymlinks, c.getDestPath()) {
|
||||
if !utils.IsStringInSlice("--safe-links", args) {
|
||||
if !util.IsStringInSlice("--safe-links", args) {
|
||||
args = append([]string{"--safe-links"}, args...)
|
||||
}
|
||||
} else {
|
||||
if !utils.IsStringInSlice("--munge-links", args) {
|
||||
if !util.IsStringInSlice("--munge-links", args) {
|
||||
args = append([]string{"--munge-links"}, args...)
|
||||
}
|
||||
}
|
||||
|
@ -533,7 +534,7 @@ func (c *sshCommand) getSourcePath() string {
|
|||
func cleanCommandPath(name string) string {
|
||||
name = strings.Trim(name, "'")
|
||||
name = strings.Trim(name, "\"")
|
||||
result := utils.CleanPath(name)
|
||||
result := util.CleanPath(name)
|
||||
if strings.HasSuffix(name, "/") && !strings.HasSuffix(result, "/") {
|
||||
result += "/"
|
||||
}
|
||||
|
@ -636,9 +637,9 @@ func (c *sshCommand) getRemovePath() (string, error) {
|
|||
func (c *sshCommand) isLocalPath(virtualPath string) bool {
|
||||
folder, err := c.connection.User.GetVirtualFolderForPath(virtualPath)
|
||||
if err != nil {
|
||||
return c.connection.User.FsConfig.Provider == vfs.LocalFilesystemProvider
|
||||
return c.connection.User.FsConfig.Provider == sdk.LocalFilesystemProvider
|
||||
}
|
||||
return folder.FsConfig.Provider == vfs.LocalFilesystemProvider
|
||||
return folder.FsConfig.Provider == sdk.LocalFilesystemProvider
|
||||
}
|
||||
|
||||
func (c *sshCommand) isLocalCopy(virtualSourcePath, virtualTargetPath string) bool {
|
||||
|
@ -735,7 +736,7 @@ func (c *sshCommand) sendExitStatus(err error) {
|
|||
c.connection.channel.Close()
|
||||
// for scp we notify single uploads/downloads
|
||||
if c.command != scpCmdName {
|
||||
metrics.SSHCommandCompleted(err)
|
||||
metric.SSHCommandCompleted(err)
|
||||
if cmdPath != "" {
|
||||
_, p, errFs := c.connection.GetFsAndResolvedPath(cmdPath)
|
||||
if errFs == nil {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/eikenb/pipeat"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -224,7 +224,7 @@ func (t *transfer) copyFromReaderToWriter(dst io.Writer, src io.Reader) (int64,
|
|||
}
|
||||
t.ErrTransfer = err
|
||||
if written > 0 || err != nil {
|
||||
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.GetType(), t.ErrTransfer)
|
||||
metric.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.GetType(), t.ErrTransfer)
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
|
|
@ -227,5 +227,6 @@
|
|||
"url": "",
|
||||
"master_key_path": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"plugins": []
|
||||
}
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
)
|
||||
|
||||
func initializeRouter(enableProfiler bool) {
|
||||
|
@ -26,7 +26,7 @@ func initializeRouter(enableProfiler bool) {
|
|||
|
||||
router.Group(func(router chi.Router) {
|
||||
router.Use(checkAuth)
|
||||
metrics.AddMetricsEndpoint(metricsPath, router)
|
||||
metric.AddMetricsEndpoint(metricsPath, router)
|
||||
|
||||
if enableProfiler {
|
||||
logger.InfoToConsole("enabling the built-in profiler")
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -105,14 +105,14 @@ func (c Conf) Initialize(configDir string) error {
|
|||
config := &tls.Config{
|
||||
GetCertificate: certMgr.GetCertificateFunc(),
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: utils.GetTLSCiphersFromNames(c.TLSCipherSuites),
|
||||
CipherSuites: util.GetTLSCiphersFromNames(c.TLSCipherSuites),
|
||||
PreferServerCipherSuites: true,
|
||||
}
|
||||
logger.Debug(logSender, "", "configured TLS cipher suites: %v", config.CipherSuites)
|
||||
httpServer.TLSConfig = config
|
||||
return utils.HTTPListenAndServe(httpServer, c.BindAddress, c.BindPort, true, logSender)
|
||||
return util.HTTPListenAndServe(httpServer, c.BindAddress, c.BindPort, true, logSender)
|
||||
}
|
||||
return utils.HTTPListenAndServe(httpServer, c.BindAddress, c.BindPort, false, logSender)
|
||||
return util.HTTPListenAndServe(httpServer, c.BindAddress, c.BindPort, false, logSender)
|
||||
}
|
||||
|
||||
// ReloadCertificateMgr reloads the certificate manager
|
||||
|
@ -124,7 +124,7 @@ func ReloadCertificateMgr() error {
|
|||
}
|
||||
|
||||
func getConfigPath(name, configDir string) string {
|
||||
if !utils.IsFileInputValid(name) {
|
||||
if !util.IsFileInputValid(name) {
|
||||
return ""
|
||||
}
|
||||
if name != "" && !filepath.IsAbs(name) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package utils
|
||||
package util
|
||||
|
||||
import "fmt"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package utils
|
||||
package util
|
||||
|
||||
import (
|
||||
"net"
|
|
@ -1,5 +1,5 @@
|
|||
// Package utils provides some common utility methods
|
||||
package utils
|
||||
// Package util provides some common utility methods
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/pkg/sftp"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
|
@ -176,7 +176,7 @@ func (fs *AzureBlobFs) Stat(name string) (os.FileInfo, error) {
|
|||
attrs, err := fs.headObject(name)
|
||||
if err == nil {
|
||||
isDir := (attrs.ContentType() == dirMimeType)
|
||||
metrics.AZListObjectsCompleted(nil)
|
||||
metric.AZListObjectsCompleted(nil)
|
||||
return NewFileInfo(name, isDir, attrs.ContentLength(), attrs.LastModified(), false), nil
|
||||
}
|
||||
if !fs.IsNotExist(err) {
|
||||
|
@ -225,7 +225,7 @@ func (fs *AzureBlobFs) Open(name string, offset int64) (File, *pipeat.PipeReader
|
|||
n, err := io.Copy(w, body)
|
||||
w.CloseWithError(err) //nolint:errcheck
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
|
||||
metrics.AZTransferCompleted(n, 1, err)
|
||||
metric.AZTransferCompleted(n, 1, err)
|
||||
}()
|
||||
|
||||
return nil, r, cancelFn, nil
|
||||
|
@ -268,7 +268,7 @@ func (fs *AzureBlobFs) Create(name string, flag int) (File, *PipeWriter, func(),
|
|||
r.CloseWithError(err) //nolint:errcheck
|
||||
p.Done(err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v", name, r.GetReadedBytes(), err)
|
||||
metrics.AZTransferCompleted(r.GetReadedBytes(), 0, err)
|
||||
metric.AZTransferCompleted(r.GetReadedBytes(), 0, err)
|
||||
}()
|
||||
|
||||
return nil, p, cancelFn, nil
|
||||
|
@ -307,7 +307,7 @@ func (fs *AzureBlobFs) Rename(source, target string) error {
|
|||
|
||||
resp, err := dstBlobURL.StartCopyFromURL(ctx, srcURL, md, mac, bac, azblob.AccessTierType(fs.config.AccessTier), nil)
|
||||
if err != nil {
|
||||
metrics.AZCopyObjectCompleted(err)
|
||||
metric.AZCopyObjectCompleted(err)
|
||||
return err
|
||||
}
|
||||
copyStatus := resp.CopyStatus()
|
||||
|
@ -321,7 +321,7 @@ func (fs *AzureBlobFs) Rename(source, target string) error {
|
|||
// of them before giving up.
|
||||
nErrors++
|
||||
if ctx.Err() != nil || nErrors == 3 {
|
||||
metrics.AZCopyObjectCompleted(err)
|
||||
metric.AZCopyObjectCompleted(err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
|
@ -330,10 +330,10 @@ func (fs *AzureBlobFs) Rename(source, target string) error {
|
|||
}
|
||||
if copyStatus != azblob.CopyStatusSuccess {
|
||||
err := fmt.Errorf("copy failed with status: %s", copyStatus)
|
||||
metrics.AZCopyObjectCompleted(err)
|
||||
metric.AZCopyObjectCompleted(err)
|
||||
return err
|
||||
}
|
||||
metrics.AZCopyObjectCompleted(nil)
|
||||
metric.AZCopyObjectCompleted(nil)
|
||||
return fs.Remove(source, fi.IsDir())
|
||||
}
|
||||
|
||||
|
@ -353,7 +353,7 @@ func (fs *AzureBlobFs) Remove(name string, isDir bool) error {
|
|||
defer cancelFn()
|
||||
|
||||
_, err := blobBlockURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
|
||||
metrics.AZDeleteObjectCompleted(err)
|
||||
metric.AZDeleteObjectCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -437,7 +437,7 @@ func (fs *AzureBlobFs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
|||
Prefix: prefix,
|
||||
})
|
||||
if err != nil {
|
||||
metrics.AZListObjectsCompleted(err)
|
||||
metric.AZListObjectsCompleted(err)
|
||||
return nil, err
|
||||
}
|
||||
marker = listBlob.NextMarker
|
||||
|
@ -476,7 +476,7 @@ func (fs *AzureBlobFs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
|||
}
|
||||
}
|
||||
|
||||
metrics.AZListObjectsCompleted(nil)
|
||||
metric.AZListObjectsCompleted(nil)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
@ -569,7 +569,7 @@ func (fs *AzureBlobFs) ScanRootDirContents() (int, int64, error) {
|
|||
Prefix: fs.config.KeyPrefix,
|
||||
})
|
||||
if err != nil {
|
||||
metrics.AZListObjectsCompleted(err)
|
||||
metric.AZListObjectsCompleted(err)
|
||||
return numFiles, size, err
|
||||
}
|
||||
marker = listBlob.NextMarker
|
||||
|
@ -591,7 +591,7 @@ func (fs *AzureBlobFs) ScanRootDirContents() (int, int64, error) {
|
|||
}
|
||||
}
|
||||
|
||||
metrics.AZListObjectsCompleted(nil)
|
||||
metric.AZListObjectsCompleted(nil)
|
||||
return numFiles, size, nil
|
||||
}
|
||||
|
||||
|
@ -654,7 +654,7 @@ func (fs *AzureBlobFs) Walk(root string, walkFn filepath.WalkFunc) error {
|
|||
Prefix: prefix,
|
||||
})
|
||||
if err != nil {
|
||||
metrics.AZListObjectsCompleted(err)
|
||||
metric.AZListObjectsCompleted(err)
|
||||
return err
|
||||
}
|
||||
marker = listBlob.NextMarker
|
||||
|
@ -678,7 +678,7 @@ func (fs *AzureBlobFs) Walk(root string, walkFn filepath.WalkFunc) error {
|
|||
}
|
||||
}
|
||||
|
||||
metrics.AZListObjectsCompleted(nil)
|
||||
metric.AZListObjectsCompleted(nil)
|
||||
return walkFn(root, NewFileInfo(root, true, 0, time.Now(), false), nil)
|
||||
}
|
||||
|
||||
|
@ -709,7 +709,7 @@ func (fs *AzureBlobFs) headObject(name string) (*azblob.BlobGetPropertiesRespons
|
|||
|
||||
blobBlockURL := fs.containerURL.NewBlockBlobURL(name)
|
||||
response, err := blobBlockURL.GetProperties(ctx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
|
||||
metrics.AZHeadObjectCompleted(err)
|
||||
metric.AZHeadObjectCompleted(err)
|
||||
return response, err
|
||||
}
|
||||
|
||||
|
@ -766,7 +766,7 @@ func (fs *AzureBlobFs) checkIfBucketExists() error {
|
|||
defer cancelFn()
|
||||
|
||||
_, err := fs.containerURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
|
||||
metrics.AZHeadContainerCompleted(err)
|
||||
metric.AZHeadContainerCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -793,7 +793,7 @@ func (fs *AzureBlobFs) hasContents(name string) (bool, error) {
|
|||
Prefix: prefix,
|
||||
MaxResults: 1,
|
||||
})
|
||||
metrics.AZListObjectsCompleted(err)
|
||||
metric.AZListObjectsCompleted(err)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
|
|
@ -4,93 +4,10 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// FilesystemProvider defines the supported storage filesystems
|
||||
type FilesystemProvider int
|
||||
|
||||
// supported values for FilesystemProvider
|
||||
const (
|
||||
LocalFilesystemProvider FilesystemProvider = iota // Local
|
||||
S3FilesystemProvider // AWS S3 compatible
|
||||
GCSFilesystemProvider // Google Cloud Storage
|
||||
AzureBlobFilesystemProvider // Azure Blob Storage
|
||||
CryptedFilesystemProvider // Local encrypted
|
||||
SFTPFilesystemProvider // SFTP
|
||||
)
|
||||
|
||||
// GetProviderByName returns the FilesystemProvider matching a given name
|
||||
//
|
||||
// to provide backwards compatibility, numeric strings are accepted as well
|
||||
func GetProviderByName(name string) FilesystemProvider {
|
||||
switch name {
|
||||
case "0", "osfs":
|
||||
return LocalFilesystemProvider
|
||||
case "1", "s3fs":
|
||||
return S3FilesystemProvider
|
||||
case "2", "gcsfs":
|
||||
return GCSFilesystemProvider
|
||||
case "3", "azblobfs":
|
||||
return AzureBlobFilesystemProvider
|
||||
case "4", "cryptfs":
|
||||
return CryptedFilesystemProvider
|
||||
case "5", "sftpfs":
|
||||
return SFTPFilesystemProvider
|
||||
}
|
||||
|
||||
// TODO think about returning an error value instead of silently defaulting to LocalFilesystemProvider
|
||||
return LocalFilesystemProvider
|
||||
}
|
||||
|
||||
// Name returns the Provider's unique name
|
||||
func (p FilesystemProvider) Name() string {
|
||||
switch p {
|
||||
case LocalFilesystemProvider:
|
||||
return "osfs"
|
||||
case S3FilesystemProvider:
|
||||
return "s3fs"
|
||||
case GCSFilesystemProvider:
|
||||
return "gcsfs"
|
||||
case AzureBlobFilesystemProvider:
|
||||
return "azblobfs"
|
||||
case CryptedFilesystemProvider:
|
||||
return "cryptfs"
|
||||
case SFTPFilesystemProvider:
|
||||
return "sftpfs"
|
||||
}
|
||||
return "" // let's not claim to be
|
||||
}
|
||||
|
||||
// ShortInfo returns a human readable, short description for the given FilesystemProvider
|
||||
func (p FilesystemProvider) ShortInfo() string {
|
||||
switch p {
|
||||
case LocalFilesystemProvider:
|
||||
return "Local"
|
||||
case S3FilesystemProvider:
|
||||
return "AWS S3 (Compatible)"
|
||||
case GCSFilesystemProvider:
|
||||
return "Google Cloud Storage"
|
||||
case AzureBlobFilesystemProvider:
|
||||
return "Azure Blob Storage"
|
||||
case CryptedFilesystemProvider:
|
||||
return "Local encrypted"
|
||||
case SFTPFilesystemProvider:
|
||||
return "SFTP"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ListProviders returns a list of available FilesystemProviders
|
||||
func ListProviders() []FilesystemProvider {
|
||||
// TODO this should ultimately be dynamic (i.e. each provider registers itself)
|
||||
return []FilesystemProvider{
|
||||
LocalFilesystemProvider, S3FilesystemProvider,
|
||||
GCSFilesystemProvider, AzureBlobFilesystemProvider,
|
||||
CryptedFilesystemProvider, SFTPFilesystemProvider,
|
||||
}
|
||||
}
|
||||
|
||||
// ValidatorHelper implements methods we need for Filesystem.ValidateConfig.
|
||||
// It is implemented by vfs.Folder and dataprovider.User
|
||||
type ValidatorHelper interface {
|
||||
|
@ -98,10 +15,10 @@ type ValidatorHelper interface {
|
|||
GetEncryptionAdditionalData() string
|
||||
}
|
||||
|
||||
// Filesystem defines cloud storage filesystem details
|
||||
// Filesystem defines filesystem details
|
||||
type Filesystem struct {
|
||||
RedactedSecret string `json:"-"`
|
||||
Provider FilesystemProvider `json:"provider"`
|
||||
Provider sdk.FilesystemProvider `json:"provider"`
|
||||
S3Config S3FsConfig `json:"s3config,omitempty"`
|
||||
GCSConfig GCSFsConfig `json:"gcsconfig,omitempty"`
|
||||
AzBlobConfig AzBlobFsConfig `json:"azblobconfig,omitempty"`
|
||||
|
@ -167,15 +84,15 @@ func (f *Filesystem) IsEqual(other *Filesystem) bool {
|
|||
return false
|
||||
}
|
||||
switch f.Provider {
|
||||
case S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
return f.S3Config.isEqual(&other.S3Config)
|
||||
case GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
return f.GCSConfig.isEqual(&other.GCSConfig)
|
||||
case AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
return f.AzBlobConfig.isEqual(&other.AzBlobConfig)
|
||||
case CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
return f.CryptConfig.isEqual(&other.CryptConfig)
|
||||
case SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
return f.SFTPConfig.isEqual(&other.SFTPConfig)
|
||||
default:
|
||||
return true
|
||||
|
@ -186,57 +103,57 @@ func (f *Filesystem) IsEqual(other *Filesystem) bool {
|
|||
// Filesystem.*Config to their zero value if successful
|
||||
func (f *Filesystem) Validate(helper ValidatorHelper) error {
|
||||
switch f.Provider {
|
||||
case S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
if err := f.S3Config.Validate(); err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not validate s3config: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not validate s3config: %v", err))
|
||||
}
|
||||
if err := f.S3Config.EncryptCredentials(helper.GetEncryptionAdditionalData()); err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not encrypt s3 access secret: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not encrypt s3 access secret: %v", err))
|
||||
}
|
||||
f.GCSConfig = GCSFsConfig{}
|
||||
f.AzBlobConfig = AzBlobFsConfig{}
|
||||
f.CryptConfig = CryptFsConfig{}
|
||||
f.SFTPConfig = SFTPFsConfig{}
|
||||
return nil
|
||||
case GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
if err := f.GCSConfig.Validate(helper.GetGCSCredentialsFilePath()); err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not validate GCS config: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not validate GCS config: %v", err))
|
||||
}
|
||||
f.S3Config = S3FsConfig{}
|
||||
f.AzBlobConfig = AzBlobFsConfig{}
|
||||
f.CryptConfig = CryptFsConfig{}
|
||||
f.SFTPConfig = SFTPFsConfig{}
|
||||
return nil
|
||||
case AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
if err := f.AzBlobConfig.Validate(); err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not validate Azure Blob config: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not validate Azure Blob config: %v", err))
|
||||
}
|
||||
if err := f.AzBlobConfig.EncryptCredentials(helper.GetEncryptionAdditionalData()); err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not encrypt Azure blob account key: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not encrypt Azure blob account key: %v", err))
|
||||
}
|
||||
f.S3Config = S3FsConfig{}
|
||||
f.GCSConfig = GCSFsConfig{}
|
||||
f.CryptConfig = CryptFsConfig{}
|
||||
f.SFTPConfig = SFTPFsConfig{}
|
||||
return nil
|
||||
case CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
if err := f.CryptConfig.Validate(); err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not validate Crypt fs config: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not validate Crypt fs config: %v", err))
|
||||
}
|
||||
if err := f.CryptConfig.EncryptCredentials(helper.GetEncryptionAdditionalData()); err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not encrypt Crypt fs passphrase: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not encrypt Crypt fs passphrase: %v", err))
|
||||
}
|
||||
f.S3Config = S3FsConfig{}
|
||||
f.GCSConfig = GCSFsConfig{}
|
||||
f.AzBlobConfig = AzBlobFsConfig{}
|
||||
f.SFTPConfig = SFTPFsConfig{}
|
||||
return nil
|
||||
case SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
if err := f.SFTPConfig.Validate(); err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not validate SFTP fs config: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not validate SFTP fs config: %v", err))
|
||||
}
|
||||
if err := f.SFTPConfig.EncryptCredentials(helper.GetEncryptionAdditionalData()); err != nil {
|
||||
return utils.NewValidationError(fmt.Sprintf("could not encrypt SFTP fs credentials: %v", err))
|
||||
return util.NewValidationError(fmt.Sprintf("could not encrypt SFTP fs credentials: %v", err))
|
||||
}
|
||||
f.S3Config = S3FsConfig{}
|
||||
f.GCSConfig = GCSFsConfig{}
|
||||
|
@ -244,7 +161,7 @@ func (f *Filesystem) Validate(helper ValidatorHelper) error {
|
|||
f.CryptConfig = CryptFsConfig{}
|
||||
return nil
|
||||
default:
|
||||
f.Provider = LocalFilesystemProvider
|
||||
f.Provider = sdk.LocalFilesystemProvider
|
||||
f.S3Config = S3FsConfig{}
|
||||
f.GCSConfig = GCSFsConfig{}
|
||||
f.AzBlobConfig = AzBlobFsConfig{}
|
||||
|
@ -258,23 +175,23 @@ func (f *Filesystem) Validate(helper ValidatorHelper) error {
|
|||
func (f *Filesystem) HasRedactedSecret() bool {
|
||||
// TODO move vfs specific code into each *FsConfig struct
|
||||
switch f.Provider {
|
||||
case S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
if f.S3Config.AccessSecret.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
case GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
if f.GCSConfig.Credentials.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
case AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
if f.AzBlobConfig.AccountKey.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
case CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
if f.CryptConfig.Passphrase.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
case SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
if f.SFTPConfig.Password.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
|
@ -289,16 +206,16 @@ func (f *Filesystem) HasRedactedSecret() bool {
|
|||
// HideConfidentialData hides filesystem confidential data
|
||||
func (f *Filesystem) HideConfidentialData() {
|
||||
switch f.Provider {
|
||||
case S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
f.S3Config.AccessSecret.Hide()
|
||||
case GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
f.GCSConfig.Credentials.Hide()
|
||||
case AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
f.AzBlobConfig.AccountKey.Hide()
|
||||
f.AzBlobConfig.SASURL.Hide()
|
||||
case CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
f.CryptConfig.Passphrase.Hide()
|
||||
case SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
f.SFTPConfig.Password.Hide()
|
||||
f.SFTPConfig.PrivateKey.Hide()
|
||||
}
|
||||
|
@ -310,6 +227,7 @@ func (f *Filesystem) GetACopy() Filesystem {
|
|||
fs := Filesystem{
|
||||
Provider: f.Provider,
|
||||
S3Config: S3FsConfig{
|
||||
S3FsConfig: sdk.S3FsConfig{
|
||||
Bucket: f.S3Config.Bucket,
|
||||
Region: f.S3Config.Region,
|
||||
AccessKey: f.S3Config.AccessKey,
|
||||
|
@ -320,7 +238,9 @@ func (f *Filesystem) GetACopy() Filesystem {
|
|||
UploadPartSize: f.S3Config.UploadPartSize,
|
||||
UploadConcurrency: f.S3Config.UploadConcurrency,
|
||||
},
|
||||
},
|
||||
GCSConfig: GCSFsConfig{
|
||||
GCSFsConfig: sdk.GCSFsConfig{
|
||||
Bucket: f.GCSConfig.Bucket,
|
||||
CredentialFile: f.GCSConfig.CredentialFile,
|
||||
Credentials: f.GCSConfig.Credentials.Clone(),
|
||||
|
@ -328,7 +248,9 @@ func (f *Filesystem) GetACopy() Filesystem {
|
|||
StorageClass: f.GCSConfig.StorageClass,
|
||||
KeyPrefix: f.GCSConfig.KeyPrefix,
|
||||
},
|
||||
},
|
||||
AzBlobConfig: AzBlobFsConfig{
|
||||
AzBlobFsConfig: sdk.AzBlobFsConfig{
|
||||
Container: f.AzBlobConfig.Container,
|
||||
AccountName: f.AzBlobConfig.AccountName,
|
||||
AccountKey: f.AzBlobConfig.AccountKey.Clone(),
|
||||
|
@ -340,10 +262,14 @@ func (f *Filesystem) GetACopy() Filesystem {
|
|||
UseEmulator: f.AzBlobConfig.UseEmulator,
|
||||
AccessTier: f.AzBlobConfig.AccessTier,
|
||||
},
|
||||
},
|
||||
CryptConfig: CryptFsConfig{
|
||||
CryptFsConfig: sdk.CryptFsConfig{
|
||||
Passphrase: f.CryptConfig.Passphrase.Clone(),
|
||||
},
|
||||
},
|
||||
SFTPConfig: SFTPFsConfig{
|
||||
SFTPFsConfig: sdk.SFTPFsConfig{
|
||||
Endpoint: f.SFTPConfig.Endpoint,
|
||||
Username: f.SFTPConfig.Username,
|
||||
Password: f.SFTPConfig.Password.Clone(),
|
||||
|
@ -352,6 +278,7 @@ func (f *Filesystem) GetACopy() Filesystem {
|
|||
DisableCouncurrentReads: f.SFTPConfig.DisableCouncurrentReads,
|
||||
BufferSize: f.SFTPConfig.BufferSize,
|
||||
},
|
||||
},
|
||||
}
|
||||
if len(f.SFTPConfig.Fingerprints) > 0 {
|
||||
fs.SFTPConfig.Fingerprints = make([]string, len(f.SFTPConfig.Fingerprints))
|
||||
|
|
|
@ -6,7 +6,8 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// BaseVirtualFolder defines the path for the virtual folder and the used quota limits.
|
||||
|
@ -65,10 +66,10 @@ func (v *BaseVirtualFolder) GetQuotaSummary() string {
|
|||
var result string
|
||||
result = "Files: " + strconv.Itoa(v.UsedQuotaFiles)
|
||||
if v.UsedQuotaSize > 0 {
|
||||
result += ". Size: " + utils.ByteCountIEC(v.UsedQuotaSize)
|
||||
result += ". Size: " + util.ByteCountIEC(v.UsedQuotaSize)
|
||||
}
|
||||
if v.LastQuotaUpdate > 0 {
|
||||
t := utils.GetTimeFromMsecSinceEpoch(v.LastQuotaUpdate)
|
||||
t := util.GetTimeFromMsecSinceEpoch(v.LastQuotaUpdate)
|
||||
result += fmt.Sprintf(". Last update: %v ", t.Format("2006-01-02 15:04")) // YYYY-MM-DD HH:MM
|
||||
}
|
||||
return result
|
||||
|
@ -77,17 +78,17 @@ func (v *BaseVirtualFolder) GetQuotaSummary() string {
|
|||
// GetStorageDescrition returns the storage description
|
||||
func (v *BaseVirtualFolder) GetStorageDescrition() string {
|
||||
switch v.FsConfig.Provider {
|
||||
case LocalFilesystemProvider:
|
||||
case sdk.LocalFilesystemProvider:
|
||||
return fmt.Sprintf("Local: %v", v.MappedPath)
|
||||
case S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
return fmt.Sprintf("S3: %v", v.FsConfig.S3Config.Bucket)
|
||||
case GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
return fmt.Sprintf("GCS: %v", v.FsConfig.GCSConfig.Bucket)
|
||||
case AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
return fmt.Sprintf("AzBlob: %v", v.FsConfig.AzBlobConfig.Container)
|
||||
case CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
return fmt.Sprintf("Encrypted: %v", v.MappedPath)
|
||||
case SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
return fmt.Sprintf("SFTP: %v", v.FsConfig.SFTPConfig.Endpoint)
|
||||
default:
|
||||
return ""
|
||||
|
@ -96,22 +97,22 @@ func (v *BaseVirtualFolder) GetStorageDescrition() string {
|
|||
|
||||
// IsLocalOrLocalCrypted returns true if the folder provider is local or local encrypted
|
||||
func (v *BaseVirtualFolder) IsLocalOrLocalCrypted() bool {
|
||||
return v.FsConfig.Provider == LocalFilesystemProvider || v.FsConfig.Provider == CryptedFilesystemProvider
|
||||
return v.FsConfig.Provider == sdk.LocalFilesystemProvider || v.FsConfig.Provider == sdk.CryptedFilesystemProvider
|
||||
}
|
||||
|
||||
// hideConfidentialData hides folder confidential data
|
||||
func (v *BaseVirtualFolder) hideConfidentialData() {
|
||||
switch v.FsConfig.Provider {
|
||||
case S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
v.FsConfig.S3Config.AccessSecret.Hide()
|
||||
case GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
v.FsConfig.GCSConfig.Credentials.Hide()
|
||||
case AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
v.FsConfig.AzBlobConfig.AccountKey.Hide()
|
||||
v.FsConfig.AzBlobConfig.SASURL.Hide()
|
||||
case CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
v.FsConfig.CryptConfig.Passphrase.Hide()
|
||||
case SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
v.FsConfig.SFTPConfig.Password.Hide()
|
||||
v.FsConfig.SFTPConfig.PrivateKey.Hide()
|
||||
}
|
||||
|
@ -128,26 +129,26 @@ func (v *BaseVirtualFolder) PrepareForRendering() {
|
|||
// HasRedactedSecret returns true if the folder has a redacted secret
|
||||
func (v *BaseVirtualFolder) HasRedactedSecret() bool {
|
||||
switch v.FsConfig.Provider {
|
||||
case S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
if v.FsConfig.S3Config.AccessSecret.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
case GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
if v.FsConfig.GCSConfig.Credentials.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
case AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
if v.FsConfig.AzBlobConfig.AccountKey.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
if v.FsConfig.AzBlobConfig.SASURL.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
case CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
if v.FsConfig.CryptConfig.Passphrase.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
case SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
if v.FsConfig.SFTPConfig.Password.IsRedacted() {
|
||||
return true
|
||||
}
|
||||
|
@ -176,17 +177,17 @@ type VirtualFolder struct {
|
|||
// GetFilesystem returns the filesystem for this folder
|
||||
func (v *VirtualFolder) GetFilesystem(connectionID string, forbiddenSelfUsers []string) (Fs, error) {
|
||||
switch v.FsConfig.Provider {
|
||||
case S3FilesystemProvider:
|
||||
case sdk.S3FilesystemProvider:
|
||||
return NewS3Fs(connectionID, v.MappedPath, v.VirtualPath, v.FsConfig.S3Config)
|
||||
case GCSFilesystemProvider:
|
||||
case sdk.GCSFilesystemProvider:
|
||||
config := v.FsConfig.GCSConfig
|
||||
config.CredentialFile = v.GetGCSCredentialsFilePath()
|
||||
return NewGCSFs(connectionID, v.MappedPath, v.VirtualPath, config)
|
||||
case AzureBlobFilesystemProvider:
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
return NewAzBlobFs(connectionID, v.MappedPath, v.VirtualPath, v.FsConfig.AzBlobConfig)
|
||||
case CryptedFilesystemProvider:
|
||||
case sdk.CryptedFilesystemProvider:
|
||||
return NewCryptFs(connectionID, v.MappedPath, v.VirtualPath, v.FsConfig.CryptConfig)
|
||||
case SFTPFilesystemProvider:
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
return NewSFTPFs(connectionID, v.VirtualPath, v.MappedPath, forbiddenSelfUsers, v.FsConfig.SFTPConfig)
|
||||
default:
|
||||
return NewOsFs(connectionID, v.MappedPath, v.VirtualPath), nil
|
||||
|
|
30
vfs/gcsfs.go
30
vfs/gcsfs.go
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
|
@ -156,7 +156,7 @@ func (fs *GCSFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, fu
|
|||
n, err := io.Copy(w, objectReader)
|
||||
w.CloseWithError(err) //nolint:errcheck
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
|
||||
metrics.GCSTransferCompleted(n, 1, err)
|
||||
metric.GCSTransferCompleted(n, 1, err)
|
||||
}()
|
||||
return nil, r, cancelFn, nil
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func (fs *GCSFs) Create(name string, flag int) (File, *PipeWriter, func(), error
|
|||
r.CloseWithError(err) //nolint:errcheck
|
||||
p.Done(err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v", name, n, err)
|
||||
metrics.GCSTransferCompleted(n, 0, err)
|
||||
metric.GCSTransferCompleted(n, 0, err)
|
||||
}()
|
||||
return nil, p, cancelFn, nil
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ func (fs *GCSFs) Rename(source, target string) error {
|
|||
copier.ContentType = contentType
|
||||
}
|
||||
_, err = copier.Run(ctx)
|
||||
metrics.GCSCopyObjectCompleted(err)
|
||||
metric.GCSCopyObjectCompleted(err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ func (fs *GCSFs) Remove(name string, isDir bool) error {
|
|||
// we can have directories without a trailing "/" (created using v2.1.0 and before)
|
||||
err = fs.svc.Bucket(fs.config.Bucket).Object(strings.TrimSuffix(name, "/")).Delete(ctx)
|
||||
}
|
||||
metrics.GCSDeleteObjectCompleted(err)
|
||||
metric.GCSDeleteObjectCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -354,7 +354,7 @@ func (fs *GCSFs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
|||
break
|
||||
}
|
||||
if err != nil {
|
||||
metrics.GCSListObjectsCompleted(err)
|
||||
metric.GCSListObjectsCompleted(err)
|
||||
return result, err
|
||||
}
|
||||
if attrs.Prefix != "" {
|
||||
|
@ -389,7 +389,7 @@ func (fs *GCSFs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
|||
result = append(result, fi)
|
||||
}
|
||||
}
|
||||
metrics.GCSListObjectsCompleted(nil)
|
||||
metric.GCSListObjectsCompleted(nil)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
@ -472,7 +472,7 @@ func (fs *GCSFs) ScanRootDirContents() (int, int64, error) {
|
|||
break
|
||||
}
|
||||
if err != nil {
|
||||
metrics.GCSListObjectsCompleted(err)
|
||||
metric.GCSListObjectsCompleted(err)
|
||||
return numFiles, size, err
|
||||
}
|
||||
if !attrs.Deleted.IsZero() {
|
||||
|
@ -485,7 +485,7 @@ func (fs *GCSFs) ScanRootDirContents() (int, int64, error) {
|
|||
numFiles++
|
||||
size += attrs.Size
|
||||
}
|
||||
metrics.GCSListObjectsCompleted(nil)
|
||||
metric.GCSListObjectsCompleted(nil)
|
||||
return numFiles, size, err
|
||||
}
|
||||
|
||||
|
@ -552,7 +552,7 @@ func (fs *GCSFs) Walk(root string, walkFn filepath.WalkFunc) error {
|
|||
}
|
||||
if err != nil {
|
||||
walkFn(root, nil, err) //nolint:errcheck
|
||||
metrics.GCSListObjectsCompleted(err)
|
||||
metric.GCSListObjectsCompleted(err)
|
||||
return err
|
||||
}
|
||||
if !attrs.Deleted.IsZero() {
|
||||
|
@ -572,7 +572,7 @@ func (fs *GCSFs) Walk(root string, walkFn filepath.WalkFunc) error {
|
|||
}
|
||||
|
||||
walkFn(root, NewFileInfo(root, true, 0, time.Now(), false), err) //nolint:errcheck
|
||||
metrics.GCSListObjectsCompleted(err)
|
||||
metric.GCSListObjectsCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -641,7 +641,7 @@ func (fs *GCSFs) checkIfBucketExists() error {
|
|||
defer cancelFn()
|
||||
bkt := fs.svc.Bucket(fs.config.Bucket)
|
||||
_, err := bkt.Attrs(ctx)
|
||||
metrics.GCSHeadBucketCompleted(err)
|
||||
metric.GCSHeadBucketCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -671,7 +671,7 @@ func (fs *GCSFs) hasContents(name string) (bool, error) {
|
|||
break
|
||||
}
|
||||
if err != nil {
|
||||
metrics.GCSListObjectsCompleted(err)
|
||||
metric.GCSListObjectsCompleted(err)
|
||||
return result, err
|
||||
}
|
||||
name, _ := fs.resolve(attrs.Name, prefix)
|
||||
|
@ -683,7 +683,7 @@ func (fs *GCSFs) hasContents(name string) (bool, error) {
|
|||
break
|
||||
}
|
||||
|
||||
metrics.GCSListObjectsCompleted(err)
|
||||
metric.GCSListObjectsCompleted(err)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
@ -705,7 +705,7 @@ func (fs *GCSFs) headObject(name string) (*storage.ObjectAttrs, error) {
|
|||
bkt := fs.svc.Bucket(fs.config.Bucket)
|
||||
obj := bkt.Object(name)
|
||||
attrs, err := obj.Attrs(ctx)
|
||||
metrics.GCSHeadObjectCompleted(err)
|
||||
metric.GCSHeadObjectCompleted(err)
|
||||
return attrs, err
|
||||
}
|
||||
|
||||
|
|
36
vfs/s3fs.go
36
vfs/s3fs.go
|
@ -23,8 +23,8 @@ import (
|
|||
"github.com/pkg/sftp"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
|
@ -178,6 +178,10 @@ func (fs *S3Fs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, fun
|
|||
}
|
||||
ctx, cancelFn := context.WithCancel(context.Background())
|
||||
downloader := s3manager.NewDownloaderWithClient(fs.svc)
|
||||
/*downloader.RequestOptions = append(downloader.RequestOptions, func(r *request.Request) {
|
||||
newCtx, _ := context.WithTimeout(r.Context(), time.Minute)
|
||||
r.SetContext(newCtx)
|
||||
})*/
|
||||
var streamRange *string
|
||||
if offset > 0 {
|
||||
streamRange = aws.String(fmt.Sprintf("bytes=%v-", offset))
|
||||
|
@ -192,7 +196,7 @@ func (fs *S3Fs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, fun
|
|||
})
|
||||
w.CloseWithError(err) //nolint:errcheck
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
|
||||
metrics.S3TransferCompleted(n, 1, err)
|
||||
metric.S3TransferCompleted(n, 1, err)
|
||||
}()
|
||||
return nil, r, cancelFn, nil
|
||||
}
|
||||
|
@ -219,8 +223,8 @@ func (fs *S3Fs) Create(name string, flag int) (File, *PipeWriter, func(), error)
|
|||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(key),
|
||||
Body: r,
|
||||
StorageClass: utils.NilIfEmpty(fs.config.StorageClass),
|
||||
ContentType: utils.NilIfEmpty(contentType),
|
||||
StorageClass: util.NilIfEmpty(fs.config.StorageClass),
|
||||
ContentType: util.NilIfEmpty(contentType),
|
||||
}, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = fs.config.UploadConcurrency
|
||||
u.PartSize = fs.config.UploadPartSize
|
||||
|
@ -229,7 +233,7 @@ func (fs *S3Fs) Create(name string, flag int) (File, *PipeWriter, func(), error)
|
|||
p.Done(err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, response: %v, readed bytes: %v, err: %+v",
|
||||
name, response, r.GetReadedBytes(), err)
|
||||
metrics.S3TransferCompleted(r.GetReadedBytes(), 0, err)
|
||||
metric.S3TransferCompleted(r.GetReadedBytes(), 0, err)
|
||||
}()
|
||||
return nil, p, cancelFn, nil
|
||||
}
|
||||
|
@ -280,10 +284,10 @@ func (fs *S3Fs) Rename(source, target string) error {
|
|||
Bucket: aws.String(fs.config.Bucket),
|
||||
CopySource: aws.String(url.PathEscape(copySource)),
|
||||
Key: aws.String(target),
|
||||
StorageClass: utils.NilIfEmpty(fs.config.StorageClass),
|
||||
ContentType: utils.NilIfEmpty(contentType),
|
||||
StorageClass: util.NilIfEmpty(fs.config.StorageClass),
|
||||
ContentType: util.NilIfEmpty(contentType),
|
||||
})
|
||||
metrics.S3CopyObjectCompleted(err)
|
||||
metric.S3CopyObjectCompleted(err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -310,7 +314,7 @@ func (fs *S3Fs) Remove(name string, isDir bool) error {
|
|||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(name),
|
||||
})
|
||||
metrics.S3DeleteObjectCompleted(err)
|
||||
metric.S3DeleteObjectCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -418,7 +422,7 @@ func (fs *S3Fs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
|||
}
|
||||
return true
|
||||
})
|
||||
metrics.S3ListObjectsCompleted(err)
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
return result, err
|
||||
}
|
||||
|
||||
|
@ -505,7 +509,7 @@ func (fs *S3Fs) ScanRootDirContents() (int, int64, error) {
|
|||
}
|
||||
return true
|
||||
})
|
||||
metrics.S3ListObjectsCompleted(err)
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
return numFiles, size, err
|
||||
}
|
||||
|
||||
|
@ -574,7 +578,7 @@ func (fs *S3Fs) Walk(root string, walkFn filepath.WalkFunc) error {
|
|||
}
|
||||
return true
|
||||
})
|
||||
metrics.S3ListObjectsCompleted(err)
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
walkFn(root, NewFileInfo(root, true, 0, time.Now(), false), err) //nolint:errcheck
|
||||
|
||||
return err
|
||||
|
@ -621,7 +625,7 @@ func (fs *S3Fs) checkIfBucketExists() error {
|
|||
_, err := fs.svc.HeadBucketWithContext(ctx, &s3.HeadBucketInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
})
|
||||
metrics.S3HeadBucketCompleted(err)
|
||||
metric.S3HeadBucketCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -641,7 +645,7 @@ func (fs *S3Fs) hasContents(name string) (bool, error) {
|
|||
Prefix: aws.String(prefix),
|
||||
MaxKeys: &maxResults,
|
||||
})
|
||||
metrics.S3ListObjectsCompleted(err)
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -664,7 +668,7 @@ func (fs *S3Fs) headObject(name string) (*s3.HeadObjectOutput, error) {
|
|||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(name),
|
||||
})
|
||||
metrics.S3HeadObjectCompleted(err)
|
||||
metric.S3HeadObjectCompleted(err)
|
||||
return obj, err
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,8 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
|
@ -35,23 +36,7 @@ var ErrSFTPLoop = errors.New("SFTP loop or nested local SFTP folders detected")
|
|||
|
||||
// SFTPFsConfig defines the configuration for SFTP based filesystem
|
||||
type SFTPFsConfig struct {
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password *kms.Secret `json:"password,omitempty"`
|
||||
PrivateKey *kms.Secret `json:"private_key,omitempty"`
|
||||
Fingerprints []string `json:"fingerprints,omitempty"`
|
||||
// Prefix is the path prefix to strip from SFTP resource paths.
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
// Concurrent reads are safe to use and disabling them will degrade performance.
|
||||
// Some servers automatically delete files once they are downloaded.
|
||||
// Using concurrent reads is problematic with such servers.
|
||||
DisableCouncurrentReads bool `json:"disable_concurrent_reads,omitempty"`
|
||||
// The buffer size (in MB) to use for transfers.
|
||||
// Buffering could improve performance for high latency networks.
|
||||
// With buffering enabled upload resume is not supported and a file
|
||||
// cannot be opened for both reading and writing at the same time
|
||||
// 0 means disabled.
|
||||
BufferSize int64 `json:"buffer_size,omitempty"`
|
||||
sdk.SFTPFsConfig
|
||||
forbiddenSelfUsernames []string `json:"-"`
|
||||
}
|
||||
|
||||
|
@ -75,7 +60,7 @@ func (c *SFTPFsConfig) isEqual(other *SFTPFsConfig) bool {
|
|||
return false
|
||||
}
|
||||
for _, fp := range c.Fingerprints {
|
||||
if !utils.IsStringInSlice(fp, other.Fingerprints) {
|
||||
if !util.IsStringInSlice(fp, other.Fingerprints) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -116,7 +101,7 @@ func (c *SFTPFsConfig) Validate() error {
|
|||
return err
|
||||
}
|
||||
if c.Prefix != "" {
|
||||
c.Prefix = utils.CleanPath(c.Prefix)
|
||||
c.Prefix = util.CleanPath(c.Prefix)
|
||||
} else {
|
||||
c.Prefix = "/"
|
||||
}
|
||||
|
@ -745,8 +730,8 @@ func (fs *SFTPFs) createConnection() error {
|
|||
User: fs.config.Username,
|
||||
HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||
fp := ssh.FingerprintSHA256(key)
|
||||
if utils.IsStringInSlice(fp, sftpFingerprints) {
|
||||
if utils.IsStringInSlice(fs.config.Username, fs.config.forbiddenSelfUsernames) {
|
||||
if util.IsStringInSlice(fp, sftpFingerprints) {
|
||||
if util.IsStringInSlice(fs.config.Username, fs.config.forbiddenSelfUsernames) {
|
||||
fsLog(fs, logger.LevelWarn, "SFTP loop or nested local SFTP folders detected, mount path %#v, username %#v, forbidden usernames: %+v",
|
||||
fs.mountPath, fs.config.Username, fs.config.forbiddenSelfUsernames)
|
||||
return ErrSFTPLoop
|
||||
|
|
79
vfs/vfs.go
79
vfs/vfs.go
|
@ -18,7 +18,8 @@ import (
|
|||
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
const dirMimeType = "inode/directory"
|
||||
|
@ -139,29 +140,7 @@ func (q *QuotaCheckResult) GetRemainingFiles() int {
|
|||
|
||||
// S3FsConfig defines the configuration for S3 based filesystem
|
||||
type S3FsConfig struct {
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
// KeyPrefix is similar to a chroot directory for local filesystem.
|
||||
// If specified then the SFTP user will only see objects that starts
|
||||
// with this prefix and so you can restrict access to a specific
|
||||
// folder. The prefix, if not empty, must not start with "/" and must
|
||||
// end with "/".
|
||||
// If empty the whole bucket contents will be available
|
||||
KeyPrefix string `json:"key_prefix,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
AccessKey string `json:"access_key,omitempty"`
|
||||
AccessSecret *kms.Secret `json:"access_secret,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
StorageClass string `json:"storage_class,omitempty"`
|
||||
// The buffer size (in MB) to use for multipart uploads. The minimum allowed part size is 5MB,
|
||||
// and if this value is set to zero, the default value (5MB) for the AWS SDK will be used.
|
||||
// The minimum allowed value is 5.
|
||||
// Please note that if the upload bandwidth between the SFTP client and SFTPGo is greater than
|
||||
// the upload bandwidth between SFTPGo and S3 then the SFTP client have to wait for the upload
|
||||
// of the last parts to S3 after it ends the file upload to SFTPGo, and it may time out.
|
||||
// Keep this in mind if you customize these parameters.
|
||||
UploadPartSize int64 `json:"upload_part_size,omitempty"`
|
||||
// How many parts are uploaded in parallel
|
||||
UploadConcurrency int `json:"upload_concurrency,omitempty"`
|
||||
sdk.S3FsConfig
|
||||
}
|
||||
|
||||
func (c *S3FsConfig) isEqual(other *S3FsConfig) bool {
|
||||
|
@ -260,19 +239,7 @@ func (c *S3FsConfig) Validate() error {
|
|||
|
||||
// GCSFsConfig defines the configuration for Google Cloud Storage based filesystem
|
||||
type GCSFsConfig struct {
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
// KeyPrefix is similar to a chroot directory for local filesystem.
|
||||
// If specified then the SFTP user will only see objects that starts
|
||||
// with this prefix and so you can restrict access to a specific
|
||||
// folder. The prefix, if not empty, must not start with "/" and must
|
||||
// end with "/".
|
||||
// If empty the whole bucket contents will be available
|
||||
KeyPrefix string `json:"key_prefix,omitempty"`
|
||||
CredentialFile string `json:"-"`
|
||||
Credentials *kms.Secret `json:"credentials,omitempty"`
|
||||
// 0 explicit, 1 automatic
|
||||
AutomaticCredentials int `json:"automatic_credentials,omitempty"`
|
||||
StorageClass string `json:"storage_class,omitempty"`
|
||||
sdk.GCSFsConfig
|
||||
}
|
||||
|
||||
func (c *GCSFsConfig) isEqual(other *GCSFsConfig) bool {
|
||||
|
@ -331,39 +298,7 @@ func (c *GCSFsConfig) Validate(credentialsFilePath string) error {
|
|||
|
||||
// AzBlobFsConfig defines the configuration for Azure Blob Storage based filesystem
|
||||
type AzBlobFsConfig struct {
|
||||
Container string `json:"container,omitempty"`
|
||||
// Storage Account Name, leave blank to use SAS URL
|
||||
AccountName string `json:"account_name,omitempty"`
|
||||
// Storage Account Key leave blank to use SAS URL.
|
||||
// The access key is stored encrypted based on the kms configuration
|
||||
AccountKey *kms.Secret `json:"account_key,omitempty"`
|
||||
// Optional endpoint. Default is "blob.core.windows.net".
|
||||
// If you use the emulator the endpoint must include the protocol,
|
||||
// for example "http://127.0.0.1:10000"
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
// Shared access signature URL, leave blank if using account/key
|
||||
SASURL *kms.Secret `json:"sas_url,omitempty"`
|
||||
// KeyPrefix is similar to a chroot directory for local filesystem.
|
||||
// If specified then the SFTPGo user will only see objects that starts
|
||||
// with this prefix and so you can restrict access to a specific
|
||||
// folder. The prefix, if not empty, must not start with "/" and must
|
||||
// end with "/".
|
||||
// If empty the whole bucket contents will be available
|
||||
KeyPrefix string `json:"key_prefix,omitempty"`
|
||||
// The buffer size (in MB) to use for multipart uploads.
|
||||
// If this value is set to zero, the default value (1MB) for the Azure SDK will be used.
|
||||
// Please note that if the upload bandwidth between the SFTPGo client and SFTPGo server is
|
||||
// greater than the upload bandwidth between SFTPGo and Azure then the SFTP client have
|
||||
// to wait for the upload of the last parts to Azure after it ends the file upload to SFTPGo,
|
||||
// and it may time out.
|
||||
// Keep this in mind if you customize these parameters.
|
||||
UploadPartSize int64 `json:"upload_part_size,omitempty"`
|
||||
// How many parts are uploaded in parallel
|
||||
UploadConcurrency int `json:"upload_concurrency,omitempty"`
|
||||
// Set to true if you use an Azure emulator such as Azurite
|
||||
UseEmulator bool `json:"use_emulator,omitempty"`
|
||||
// Blob Access Tier
|
||||
AccessTier string `json:"access_tier,omitempty"`
|
||||
sdk.AzBlobFsConfig
|
||||
}
|
||||
|
||||
func (c *AzBlobFsConfig) isEqual(other *AzBlobFsConfig) bool {
|
||||
|
@ -476,7 +411,7 @@ func (c *AzBlobFsConfig) Validate() error {
|
|||
if c.UploadConcurrency < 0 || c.UploadConcurrency > 64 {
|
||||
return fmt.Errorf("invalid upload concurrency: %v", c.UploadConcurrency)
|
||||
}
|
||||
if !utils.IsStringInSlice(c.AccessTier, validAzAccessTier) {
|
||||
if !util.IsStringInSlice(c.AccessTier, validAzAccessTier) {
|
||||
return fmt.Errorf("invalid access tier %#v, valid values: \"''%v\"", c.AccessTier, strings.Join(validAzAccessTier, ", "))
|
||||
}
|
||||
return nil
|
||||
|
@ -484,7 +419,7 @@ func (c *AzBlobFsConfig) Validate() error {
|
|||
|
||||
// CryptFsConfig defines the configuration to store local files as encrypted
|
||||
type CryptFsConfig struct {
|
||||
Passphrase *kms.Secret `json:"passphrase,omitempty"`
|
||||
sdk.CryptFsConfig
|
||||
}
|
||||
|
||||
func (c *CryptFsConfig) isEqual(other *CryptFsConfig) bool {
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -56,7 +56,7 @@ func (c *Connection) GetCommand() string {
|
|||
func (c *Connection) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
|
||||
c.UpdateLastActivity()
|
||||
|
||||
name = utils.CleanPath(name)
|
||||
name = util.CleanPath(name)
|
||||
return c.CreateDir(name)
|
||||
}
|
||||
|
||||
|
@ -64,8 +64,8 @@ func (c *Connection) Mkdir(ctx context.Context, name string, perm os.FileMode) e
|
|||
func (c *Connection) Rename(ctx context.Context, oldName, newName string) error {
|
||||
c.UpdateLastActivity()
|
||||
|
||||
oldName = utils.CleanPath(oldName)
|
||||
newName = utils.CleanPath(newName)
|
||||
oldName = util.CleanPath(oldName)
|
||||
newName = util.CleanPath(newName)
|
||||
|
||||
return c.BaseConnection.Rename(oldName, newName)
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func (c *Connection) Rename(ctx context.Context, oldName, newName string) error
|
|||
func (c *Connection) Stat(ctx context.Context, name string) (os.FileInfo, error) {
|
||||
c.UpdateLastActivity()
|
||||
|
||||
name = utils.CleanPath(name)
|
||||
name = util.CleanPath(name)
|
||||
if !c.User.HasPerm(dataprovider.PermListItems, path.Dir(name)) {
|
||||
return nil, c.GetPermissionDeniedError()
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func (c *Connection) Stat(ctx context.Context, name string) (os.FileInfo, error)
|
|||
func (c *Connection) RemoveAll(ctx context.Context, name string) error {
|
||||
c.UpdateLastActivity()
|
||||
|
||||
name = utils.CleanPath(name)
|
||||
name = util.CleanPath(name)
|
||||
fs, p, err := c.GetFsAndResolvedPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -116,7 +116,7 @@ func (c *Connection) RemoveAll(ctx context.Context, name string) error {
|
|||
func (c *Connection) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
|
||||
c.UpdateLastActivity()
|
||||
|
||||
name = utils.CleanPath(name)
|
||||
name = util.CleanPath(name)
|
||||
fs, p, err := c.GetFsAndResolvedPath(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -351,7 +351,7 @@ func (c *Connection) orderDirsToRemove(fs vfs.Fs, dirsToRemove []objectMapping)
|
|||
|
||||
for len(orderedDirs) < len(dirsToRemove) {
|
||||
for idx, d := range dirsToRemove {
|
||||
if utils.IsStringInSlice(d.fsPath, removedDirs) {
|
||||
if util.IsStringInSlice(d.fsPath, removedDirs) {
|
||||
continue
|
||||
}
|
||||
isEmpty := true
|
||||
|
@ -359,7 +359,7 @@ func (c *Connection) orderDirsToRemove(fs vfs.Fs, dirsToRemove []objectMapping)
|
|||
if idx == idx1 {
|
||||
continue
|
||||
}
|
||||
if utils.IsStringInSlice(d1.fsPath, removedDirs) {
|
||||
if util.IsStringInSlice(d1.fsPath, removedDirs) {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(d1.fsPath, d.fsPath+pathSeparator) {
|
||||
|
|
|
@ -23,7 +23,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/sdk"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
|
@ -381,8 +382,10 @@ func TestOrderDirsToRemove(t *testing.T) {
|
|||
|
||||
func TestUserInvalidParams(t *testing.T) {
|
||||
u := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
HomeDir: "invalid",
|
||||
},
|
||||
}
|
||||
c := &Configuration{
|
||||
Bindings: []Binding{
|
||||
|
@ -433,15 +436,15 @@ func TestRemoteAddress(t *testing.T) {
|
|||
assert.Empty(t, req.RemoteAddr)
|
||||
|
||||
req.Header.Set("True-Client-IP", remoteAddr1)
|
||||
ip := utils.GetRealIP(req)
|
||||
ip := util.GetRealIP(req)
|
||||
assert.Equal(t, remoteAddr1, ip)
|
||||
req.Header.Del("True-Client-IP")
|
||||
req.Header.Set("CF-Connecting-IP", remoteAddr1)
|
||||
ip = utils.GetRealIP(req)
|
||||
ip = util.GetRealIP(req)
|
||||
assert.Equal(t, remoteAddr1, ip)
|
||||
req.Header.Del("CF-Connecting-IP")
|
||||
req.Header.Set("X-Forwarded-For", remoteAddr1)
|
||||
ip = utils.GetRealIP(req)
|
||||
ip = util.GetRealIP(req)
|
||||
assert.Equal(t, remoteAddr1, ip)
|
||||
// this will be ignored, remoteAddr1 is not allowed to se this header
|
||||
req.Header.Set("X-Forwarded-For", remoteAddr2)
|
||||
|
@ -453,7 +456,7 @@ func TestRemoteAddress(t *testing.T) {
|
|||
assert.Empty(t, ip)
|
||||
|
||||
req.Header.Set("X-Forwarded-For", fmt.Sprintf("%v, %v", remoteAddr2, remoteAddr1))
|
||||
ip = utils.GetRealIP(req)
|
||||
ip = util.GetRealIP(req)
|
||||
assert.Equal(t, remoteAddr2, ip)
|
||||
|
||||
req.RemoteAddr = remoteAddr2
|
||||
|
@ -477,7 +480,7 @@ func TestRemoteAddress(t *testing.T) {
|
|||
req.Header.Del("X-Forwarded-For")
|
||||
req.RemoteAddr = ""
|
||||
req.Header.Set("X-Real-IP", remoteAddr1)
|
||||
ip = utils.GetRealIP(req)
|
||||
ip = util.GetRealIP(req)
|
||||
assert.Equal(t, remoteAddr1, ip)
|
||||
req.RemoteAddr = ""
|
||||
}
|
||||
|
@ -492,7 +495,9 @@ func TestConnWithNilRequest(t *testing.T) {
|
|||
func TestResolvePathErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: "invalid",
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -561,7 +566,9 @@ func TestResolvePathErrors(t *testing.T) {
|
|||
func TestFileAccessErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -622,7 +629,9 @@ func TestFileAccessErrors(t *testing.T) {
|
|||
|
||||
func TestRemoveDirTree(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -673,7 +682,9 @@ func TestRemoveDirTree(t *testing.T) {
|
|||
|
||||
func TestContentType(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -722,7 +733,9 @@ func TestContentType(t *testing.T) {
|
|||
|
||||
func TestTransferReadWriteErrors(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -815,7 +828,9 @@ func TestTransferReadWriteErrors(t *testing.T) {
|
|||
|
||||
func TestTransferSeek(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -910,11 +925,13 @@ func TestBasicUsersCache(t *testing.T) {
|
|||
username := "webdav_internal_test"
|
||||
password := "pwd"
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
Password: password,
|
||||
HomeDir: filepath.Join(os.TempDir(), username),
|
||||
Status: 1,
|
||||
ExpirationDate: 0,
|
||||
},
|
||||
}
|
||||
u.Permissions = make(map[string][]string)
|
||||
u.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -1032,11 +1049,13 @@ func TestCachedUserWithFolders(t *testing.T) {
|
|||
password := "dav_pwd"
|
||||
folderName := "test_folder"
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
Password: password,
|
||||
HomeDir: filepath.Join(os.TempDir(), username),
|
||||
Status: 1,
|
||||
ExpirationDate: 0,
|
||||
},
|
||||
}
|
||||
u.Permissions = make(map[string][]string)
|
||||
u.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -1140,9 +1159,11 @@ func TestUsersCacheSizeAndExpiration(t *testing.T) {
|
|||
username := "webdav_internal_test"
|
||||
password := "pwd"
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: filepath.Join(os.TempDir(), username),
|
||||
Status: 1,
|
||||
ExpirationDate: 0,
|
||||
},
|
||||
}
|
||||
u.Username = username + "1"
|
||||
u.Password = password + "1"
|
||||
|
@ -1348,11 +1369,13 @@ func TestUserCacheIsolation(t *testing.T) {
|
|||
username := "webdav_internal_cache_test"
|
||||
password := "dav_pwd"
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
Password: password,
|
||||
HomeDir: filepath.Join(os.TempDir(), username),
|
||||
Status: 1,
|
||||
ExpirationDate: 0,
|
||||
},
|
||||
}
|
||||
u.Permissions = make(map[string][]string)
|
||||
u.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
|
@ -1382,13 +1405,13 @@ func TestUserCacheIsolation(t *testing.T) {
|
|||
assert.True(t, cachedUser.User.FsConfig.S3Config.AccessSecret.IsEncrypted())
|
||||
err = cachedUser.User.FsConfig.S3Config.AccessSecret.Decrypt()
|
||||
assert.NoError(t, err)
|
||||
cachedUser.User.FsConfig.Provider = vfs.S3FilesystemProvider
|
||||
cachedUser.User.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
_, err = cachedUser.User.GetFilesystem("")
|
||||
assert.Error(t, err, "we don't have to get the previously cached filesystem!")
|
||||
}
|
||||
cachedUser, ok = dataprovider.GetCachedWebDAVUser(username)
|
||||
if assert.True(t, ok) {
|
||||
assert.Equal(t, vfs.LocalFilesystemProvider, cachedUser.User.FsConfig.Provider)
|
||||
assert.Equal(t, sdk.LocalFilesystemProvider, cachedUser.User.FsConfig.Provider)
|
||||
assert.False(t, cachedUser.User.FsConfig.S3Config.AccessSecret.IsEncrypted())
|
||||
}
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metrics"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
type webDavServer struct {
|
||||
|
@ -59,7 +59,7 @@ func (s *webDavServer) listenAndServe(compressor *middleware.Compressor) error {
|
|||
httpServer.TLSConfig = &tls.Config{
|
||||
GetCertificate: certMgr.GetCertificateFunc(),
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: utils.GetTLSCiphersFromNames(s.binding.TLSCipherSuites),
|
||||
CipherSuites: util.GetTLSCiphersFromNames(s.binding.TLSCipherSuites),
|
||||
PreferServerCipherSuites: true,
|
||||
}
|
||||
logger.Debug(logSender, "", "configured TLS cipher suites for binding %#v: %v", s.binding.GetAddress(),
|
||||
|
@ -74,11 +74,11 @@ func (s *webDavServer) listenAndServe(compressor *middleware.Compressor) error {
|
|||
httpServer.TLSConfig.ClientAuth = tls.VerifyClientCertIfGiven
|
||||
}
|
||||
}
|
||||
return utils.HTTPListenAndServe(httpServer, s.binding.Address, s.binding.Port, true, logSender)
|
||||
return util.HTTPListenAndServe(httpServer, s.binding.Address, s.binding.Port, true, logSender)
|
||||
}
|
||||
s.binding.EnableHTTPS = false
|
||||
serviceStatus.Bindings = append(serviceStatus.Bindings, s.binding)
|
||||
return utils.HTTPListenAndServe(httpServer, s.binding.Address, s.binding.Port, false, logSender)
|
||||
return util.HTTPListenAndServe(httpServer, s.binding.Address, s.binding.Port, false, logSender)
|
||||
}
|
||||
|
||||
func (s *webDavServer) verifyTLSConnection(state tls.ConnectionState) error {
|
||||
|
@ -299,7 +299,7 @@ func (s *webDavServer) validateUser(user *dataprovider.User, r *http.Request, lo
|
|||
user.Username, user.HomeDir)
|
||||
return connID, fmt.Errorf("cannot login user with invalid home dir: %#v", user.HomeDir)
|
||||
}
|
||||
if utils.IsStringInSlice(common.ProtocolWebDAV, user.Filters.DeniedProtocols) {
|
||||
if util.IsStringInSlice(common.ProtocolWebDAV, user.Filters.DeniedProtocols) {
|
||||
logger.Debug(logSender, connectionID, "cannot login user %#v, protocol DAV is not allowed", user.Username)
|
||||
return connID, fmt.Errorf("protocol DAV is not allowed for user %#v", user.Username)
|
||||
}
|
||||
|
@ -323,12 +323,12 @@ func (s *webDavServer) validateUser(user *dataprovider.User, r *http.Request, lo
|
|||
}
|
||||
|
||||
func (s *webDavServer) checkRemoteAddress(r *http.Request) string {
|
||||
ipAddr := utils.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
|
||||
ip := net.ParseIP(ipAddr)
|
||||
if ip != nil {
|
||||
for _, allow := range s.binding.allowHeadersFrom {
|
||||
if allow(ip) {
|
||||
parsedIP := utils.GetRealIP(r)
|
||||
parsedIP := util.GetRealIP(r)
|
||||
if parsedIP != "" {
|
||||
ipAddr = parsedIP
|
||||
r.RemoteAddr = ipAddr
|
||||
|
@ -366,15 +366,15 @@ func writeLog(r *http.Request, err error) {
|
|||
}
|
||||
|
||||
func updateLoginMetrics(user *dataprovider.User, ip, loginMethod string, err error) {
|
||||
metrics.AddLoginAttempt(loginMethod)
|
||||
metric.AddLoginAttempt(loginMethod)
|
||||
if err != nil && err != common.ErrInternalFailure {
|
||||
logger.ConnectionFailedLog(user.Username, ip, loginMethod, common.ProtocolWebDAV, err.Error())
|
||||
event := common.HostEventLoginFailed
|
||||
if _, ok := err.(*utils.RecordNotFoundError); ok {
|
||||
if _, ok := err.(*util.RecordNotFoundError); ok {
|
||||
event = common.HostEventUserNotFound
|
||||
}
|
||||
common.AddDefenderEvent(ip, event)
|
||||
}
|
||||
metrics.AddLoginResult(loginMethod, err)
|
||||
metric.AddLoginResult(loginMethod, err)
|
||||
dataprovider.ExecutePostLoginHook(user, loginMethod, ip, common.ProtocolWebDAV, err)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/utils"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
type ctxReqParams int
|
||||
|
@ -97,7 +97,7 @@ type Binding struct {
|
|||
}
|
||||
|
||||
func (b *Binding) parseAllowedProxy() error {
|
||||
allowedFuncs, err := utils.ParseAllowedIPAndRanges(b.ProxyAllowed)
|
||||
allowedFuncs, err := util.ParseAllowedIPAndRanges(b.ProxyAllowed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ func ReloadCertificateMgr() error {
|
|||
}
|
||||
|
||||
func getConfigPath(name, configDir string) string {
|
||||
if !utils.IsFileInputValid(name) {
|
||||
if !util.IsFileInputValid(name) {
|
||||
return ""
|
||||
}
|
||||
if name != "" && !filepath.IsAbs(name) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue