add WebDAV support

Fixes #147
This commit is contained in:
Nicola Murino 2020-08-11 23:56:10 +02:00
parent 9a15a54885
commit a9e21c282a
48 changed files with 3376 additions and 363 deletions

View file

@ -46,7 +46,7 @@ jobs:
shell: bash
- name: Run test cases using SQLite provider
run: go test -v -timeout 5m ./... -coverprofile=coverage.txt -covermode=atomic
run: go test -v -p 1 -timeout 5m ./... -coverprofile=coverage.txt -covermode=atomic
- name: Upload coverage to Codecov
if: ${{ matrix.upload-coverage }}
@ -57,17 +57,18 @@ jobs:
- name: Run test cases using bolt provider
run: |
go test -v -timeout 1m ./config -covermode=atomic
go test -v -timeout 1m ./common -covermode=atomic
go test -v -timeout 5m ./httpd -covermode=atomic
go test -v -timeout 5m ./sftpd -covermode=atomic
go test -v -timeout 5m ./ftpd -covermode=atomic
go test -v -p 1 -timeout 1m ./config -covermode=atomic
go test -v -p 1 -timeout 1m ./common -covermode=atomic
go test -v -p 1 -timeout 2m ./httpd -covermode=atomic
go test -v -p 1 -timeout 5m ./sftpd -covermode=atomic
go test -v -p 1 -timeout 2m ./ftpd -covermode=atomic
go test -v -p 1 -timeout 2m ./webdavd -covermode=atomic
env:
SFTPGO_DATA_PROVIDER__DRIVER: bolt
SFTPGO_DATA_PROVIDER__NAME: 'sftpgo_bolt.db'
- name: Run test cases using memory provider
run: go test -v -timeout 5m ./... -covermode=atomic
run: go test -v -p 1 -timeout 5m ./... -covermode=atomic
env:
SFTPGO_DATA_PROVIDER__DRIVER: memory
SFTPGO_DATA_PROVIDER__NAME: ''
@ -149,7 +150,7 @@ jobs:
- name: Run tests using PostgreSQL provider
run: |
./sftpgo initprovider
go test -v -timeout 5m ./... -covermode=atomic
go test -v -p 1 -timeout 5m ./... -covermode=atomic
env:
SFTPGO_DATA_PROVIDER__DRIVER: postgresql
SFTPGO_DATA_PROVIDER__NAME: sftpgo
@ -161,7 +162,7 @@ jobs:
- name: Run tests using MySQL provider
run: |
./sftpgo initprovider
go test -v -timeout 5m ./... -covermode=atomic
go test -v -p 1 -timeout 5m ./... -covermode=atomic
env:
SFTPGO_DATA_PROVIDER__DRIVER: mysql
SFTPGO_DATA_PROVIDER__NAME: sftpgo

View file

@ -6,13 +6,14 @@
[![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0)
[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go)
Fully featured and highly configurable SFTP server, written in Go
Fully featured and highly configurable SFTP server with optional FTP/S and WebDAV support, written in Go.
It can serve local filesystem, S3 or Google Cloud Storage.
## Features
- Each account is chrooted to its home directory.
- SFTP accounts are virtual accounts stored in a "data provider".
- SFTPGo uses virtual accounts stored inside a "data provider".
- SQLite, MySQL, PostgreSQL, bbolt (key/value store in pure Go) and in-memory data providers are supported.
- Each account is chrooted to its home directory.
- Public key and password authentication. Multiple public keys per user are supported.
- SSH user [certificate authentication](https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?rev=1.8).
- Keyboard interactive authentication. You can easily setup a customizable multi-factor authentication.
@ -34,9 +35,10 @@ Fully featured and highly configurable SFTP server, written in Go
- Support for Git repositories over SSH.
- SCP and rsync are supported.
- FTP/S is supported.
- Support for serving local filesystem, S3 Compatible Object Storage and Google Cloud Storage over SFTP/SCP/FTP.
- WebDAV is supported.
- Support for serving local filesystem, S3 Compatible Object Storage and Google Cloud Storage over SFTP/SCP/FTP/WebDAV.
- [Prometheus metrics](./docs/metrics.md) are exposed.
- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP service without losing the information about the client's address.
- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP/WebDAV service without losing the information about the client's address.
- [REST API](./docs/rest-api.md) for users and folders management, backup, restore and real time reports of the active connections with possibility of forcibly closing a connection.
- [Web based administration interface](./docs/web-admin.md) to easily manage users, folders and connections.
- Easy [migration](./examples/rest-api-cli#convert-users-from-other-stores) from Linux system user accounts.

View file

@ -61,10 +61,11 @@ const (
// Supported protocols
const (
ProtocolSFTP = "SFTP"
ProtocolSCP = "SCP"
ProtocolSSH = "SSH"
ProtocolFTP = "FTP"
ProtocolSFTP = "SFTP"
ProtocolSCP = "SCP"
ProtocolSSH = "SSH"
ProtocolFTP = "FTP"
ProtocolWebDAV = "DAV"
)
// Upload modes
@ -94,7 +95,7 @@ var (
QuotaScans ActiveScans
idleTimeoutTicker *time.Ticker
idleTimeoutTickerDone chan bool
supportedProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP}
supportedProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP, ProtocolWebDAV}
)
// Initialize sets the common configuration
@ -138,6 +139,7 @@ type ActiveTransfer interface {
GetSize() int64
GetVirtualPath() string
GetStartTime() time.Time
SignalClose()
}
// ActiveConnection defines the interface for the current active connections
@ -178,12 +180,13 @@ type ConnectionTransfer struct {
func (t *ConnectionTransfer) getConnectionTransferAsString() string {
result := ""
if t.OperationType == operationUpload {
result += "UL"
} else {
result += "DL"
switch t.OperationType {
case operationUpload:
result += "UL "
case operationDownload:
result += "DL "
}
result += fmt.Sprintf(" %#v ", t.VirtualPath)
result += fmt.Sprintf("%#v ", t.VirtualPath)
if t.Size > 0 {
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(t.StartTime))
speed := float64(t.Size) / float64(utils.GetTimeAsMsSinceEpoch(time.Now())-t.StartTime)
@ -277,11 +280,11 @@ func (c *Configuration) GetProxyListener(listener net.Listener) (*proxyproto.Lis
}
// ExecutePostConnectHook executes the post connect hook if defined
func (c *Configuration) ExecutePostConnectHook(remoteAddr net.Addr, protocol string) error {
func (c *Configuration) ExecutePostConnectHook(remoteAddr, protocol string) error {
if len(c.PostConnectHook) == 0 {
return nil
}
ip := utils.GetIPFromRemoteAddress(remoteAddr.String())
ip := utils.GetIPFromRemoteAddress(remoteAddr)
if strings.HasPrefix(c.PostConnectHook, "http") {
var url *url.URL
url, err := url.Parse(c.PostConnectHook)
@ -469,7 +472,7 @@ func (conns *ActiveConnections) GetStats() []ConnectionStatus {
ConnectionTime: utils.GetTimeAsMsSinceEpoch(c.GetConnectionTime()),
LastActivity: utils.GetTimeAsMsSinceEpoch(c.GetLastActivity()),
Protocol: c.GetProtocol(),
SSHCommand: c.GetCommand(),
Command: c.GetCommand(),
Transfers: c.GetTransfers(),
}
stats = append(stats, stat)
@ -491,12 +494,12 @@ type ConnectionStatus struct {
ConnectionTime int64 `json:"connection_time"`
// Last activity as unix timestamp in milliseconds
LastActivity int64 `json:"last_activity"`
// Protocol for this connection: SFTP, SCP, SSH
// Protocol for this connection
Protocol string `json:"protocol"`
// active uploads/downloads
Transfers []ConnectionTransfer `json:"active_transfers,omitempty"`
// for the SSH protocol this is the issued command
SSHCommand string `json:"ssh_command,omitempty"`
// SSH command or WevDAV method
Command string `json:"command,omitempty"`
}
// GetConnectionDuration returns the connection duration as string
@ -510,8 +513,11 @@ func (c ConnectionStatus) GetConnectionDuration() string {
// For SSH commands the issued command is returned too.
func (c ConnectionStatus) GetConnectionInfo() string {
result := fmt.Sprintf("%v. Client: %#v From: %#v", c.Protocol, c.ClientVersion, c.RemoteAddress)
if c.Protocol == ProtocolSSH && len(c.SSHCommand) > 0 {
result += fmt.Sprintf(". Command: %#v", c.SSHCommand)
if c.Protocol == ProtocolSSH && len(c.Command) > 0 {
result += fmt.Sprintf(". Command: %#v", c.Command)
}
if c.Protocol == ProtocolWebDAV && len(c.Command) > 0 {
result += fmt.Sprintf(". Method: %#v", c.Command)
}
return result
}

View file

@ -8,6 +8,7 @@ import (
"os/exec"
"runtime"
"strings"
"sync/atomic"
"testing"
"time"
@ -36,7 +37,7 @@ type providerConf struct {
type fakeConnection struct {
*BaseConnection
sshCommand string
command string
}
func (c *fakeConnection) AddUser(user dataprovider.User) error {
@ -59,7 +60,7 @@ func (c *fakeConnection) GetClientVersion() string {
}
func (c *fakeConnection) GetCommand() string {
return c.sshCommand
return c.command
}
func (c *fakeConnection) GetRemoteAddress() string {
@ -277,13 +278,20 @@ func TestConnectionStatus(t *testing.T) {
c2 := NewBaseConnection("id2", ProtocolSSH, user, nil)
fakeConn2 := &fakeConnection{
BaseConnection: c2,
sshCommand: "md5sum",
command: "md5sum",
}
c3 := NewBaseConnection("id3", ProtocolWebDAV, user, nil)
fakeConn3 := &fakeConnection{
BaseConnection: c3,
command: "PROPFIND",
}
t3 := NewBaseTransfer(nil, c3, nil, "/p2", "/r2", TransferDownload, 0, 0, true)
Connections.Add(fakeConn1)
Connections.Add(fakeConn2)
Connections.Add(fakeConn3)
stats := Connections.GetStats()
assert.Len(t, stats, 2)
assert.Len(t, stats, 3)
for _, stat := range stats {
assert.Equal(t, stat.Username, username)
assert.True(t, strings.HasPrefix(stat.GetConnectionInfo(), stat.Protocol))
@ -298,6 +306,9 @@ func TestConnectionStatus(t *testing.T) {
assert.True(t, strings.HasPrefix(tr.getConnectionTransferAsString(), "UL"))
}
}
} else if stat.ConnectionID == "DAV_id3" {
assert.Len(t, stat.Transfers, 1)
assert.Greater(t, len(stat.GetTransfersAsString()), 0)
} else {
assert.Equal(t, 0, len(stat.GetTransfersAsString()))
}
@ -308,8 +319,17 @@ func TestConnectionStatus(t *testing.T) {
err = t2.Close()
assert.NoError(t, err)
err = fakeConn3.SignalTransfersAbort()
assert.NoError(t, err)
assert.Equal(t, int32(1), atomic.LoadInt32(&t3.AbortTransfer))
err = t3.Close()
assert.NoError(t, err)
err = fakeConn3.SignalTransfersAbort()
assert.Error(t, err)
Connections.Remove(fakeConn1.GetID())
Connections.Remove(fakeConn2.GetID())
Connections.Remove(fakeConn3.GetID())
stats = Connections.GetStats()
assert.Len(t, stats, 0)
}
@ -378,34 +398,34 @@ func TestPostConnectHook(t *testing.T) {
Zone: "",
}
assert.NoError(t, Config.ExecutePostConnectHook(remoteAddr, ProtocolFTP))
assert.NoError(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolFTP))
Config.PostConnectHook = "http://foo\x7f.com/"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr, ProtocolSFTP))
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
Config.PostConnectHook = "http://invalid:1234/"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr, ProtocolSFTP))
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
Config.PostConnectHook = fmt.Sprintf("http://%v/404", httpAddr)
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr, ProtocolFTP))
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolFTP))
Config.PostConnectHook = fmt.Sprintf("http://%v", httpAddr)
assert.NoError(t, Config.ExecutePostConnectHook(remoteAddr, ProtocolFTP))
assert.NoError(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolFTP))
Config.PostConnectHook = "invalid"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr, ProtocolFTP))
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolFTP))
if runtime.GOOS == osWindows {
Config.PostConnectHook = "C:\\bad\\command"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr, ProtocolSFTP))
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
} else {
Config.PostConnectHook = "/invalid/path"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr, ProtocolSFTP))
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
hookCmd, err := exec.LookPath("true")
assert.NoError(t, err)
Config.PostConnectHook = hookCmd
assert.NoError(t, Config.ExecutePostConnectHook(remoteAddr, ProtocolSFTP))
assert.NoError(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
}
Config.PostConnectHook = ""

View file

@ -1,6 +1,7 @@
package common
import (
"errors"
"fmt"
"os"
"path"
@ -138,10 +139,11 @@ func (c *BaseConnection) GetTransfers() []ConnectionTransfer {
transfers := make([]ConnectionTransfer, 0, len(c.activeTransfers))
for _, t := range c.activeTransfers {
var operationType string
if t.GetType() == TransferUpload {
operationType = operationUpload
} else {
switch t.GetType() {
case TransferDownload:
operationType = operationDownload
case TransferUpload:
operationType = operationUpload
}
transfers = append(transfers, ConnectionTransfer{
ID: t.GetID(),
@ -155,6 +157,21 @@ func (c *BaseConnection) GetTransfers() []ConnectionTransfer {
return transfers
}
// SignalTransfersAbort signals to the active transfers to exit as soon as possible
func (c *BaseConnection) SignalTransfersAbort() error {
c.RLock()
defer c.RUnlock()
if len(c.activeTransfers) == 0 {
return errors.New("no active transfer found")
}
for _, t := range c.activeTransfers {
t.SignalClose()
}
return nil
}
// ListDir reads the directory named by fsPath and returns a list of directory entries
func (c *BaseConnection) ListDir(fsPath, virtualPath string) ([]os.FileInfo, error) {
if !c.User.HasPerm(dataprovider.PermListItems, virtualPath) {
@ -187,8 +204,8 @@ func (c *BaseConnection) CreateDir(fsPath, virtualPath string) error {
return nil
}
// RemoveFile removes a file at the specified fsPath
func (c *BaseConnection) RemoveFile(fsPath, virtualPath string, info os.FileInfo) error {
// IsRemoveFileAllowed returns an error if removing this file is not allowed
func (c *BaseConnection) IsRemoveFileAllowed(fsPath, virtualPath string) error {
if !c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualPath)) {
return c.GetPermissionDeniedError()
}
@ -196,6 +213,14 @@ func (c *BaseConnection) RemoveFile(fsPath, virtualPath string, info os.FileInfo
c.Log(logger.LevelDebug, "removing file %#v is not allowed", fsPath)
return c.GetPermissionDeniedError()
}
return nil
}
// RemoveFile removes a file at the specified fsPath
func (c *BaseConnection) RemoveFile(fsPath, virtualPath string, info os.FileInfo) error {
if err := c.IsRemoveFileAllowed(fsPath, virtualPath); err != nil {
return err
}
size := info.Size()
action := newActionNotification(&c.User, operationPreDelete, fsPath, "", "", c.protocol, size, nil)
actionErr := action.execute()
@ -227,8 +252,8 @@ func (c *BaseConnection) RemoveFile(fsPath, virtualPath string, info os.FileInfo
return nil
}
// RemoveDir removes a directory at the specified fsPath
func (c *BaseConnection) RemoveDir(fsPath, virtualPath string) error {
// IsRemoveDirAllowed returns an error if removing this directory is not allowed
func (c *BaseConnection) IsRemoveDirAllowed(fsPath, virtualPath string) error {
if c.Fs.GetRelativePath(fsPath) == "/" {
c.Log(logger.LevelWarn, "removing root dir is not allowed")
return c.GetPermissionDeniedError()
@ -248,6 +273,14 @@ func (c *BaseConnection) RemoveDir(fsPath, virtualPath string) error {
if !c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualPath)) {
return c.GetPermissionDeniedError()
}
return nil
}
// RemoveDir removes a directory at the specified fsPath
func (c *BaseConnection) RemoveDir(fsPath, virtualPath string) error {
if err := c.IsRemoveDirAllowed(fsPath, virtualPath); err != nil {
return err
}
var fi os.FileInfo
var err error
@ -440,7 +473,7 @@ func (c *BaseConnection) checkRecursiveRenameDirPermissions(sourcePath, targetPa
dstPath := strings.Replace(walkedPath, sourcePath, targetPath, 1)
virtualSrcPath := c.Fs.GetRelativePath(walkedPath)
virtualDstPath := c.Fs.GetRelativePath(dstPath)
// walk scans the directory tree in order, checking the parent dirctory permissions we are sure that all contents
// walk scans the directory tree in order, checking the parent directory permissions we are sure that all contents
// inside the parent path was checked. If the current dir has no subdirs with defined permissions inside it
// and it has all the possible permissions we can stop scanning
if !c.User.HasPermissionsInside(path.Dir(virtualSrcPath)) && !c.User.HasPermissionsInside(path.Dir(virtualDstPath)) {
@ -756,6 +789,8 @@ func (c *BaseConnection) GetPermissionDeniedError() error {
switch c.protocol {
case ProtocolSFTP:
return sftp.ErrSSHFxPermissionDenied
case ProtocolWebDAV:
return os.ErrPermission
default:
return ErrPermissionDenied
}
@ -766,6 +801,8 @@ func (c *BaseConnection) GetNotExistError() error {
switch c.protocol {
case ProtocolSFTP:
return sftp.ErrSSHFxNoSuchFile
case ProtocolWebDAV:
return os.ErrNotExist
default:
return ErrNotExist
}

View file

@ -1035,6 +1035,8 @@ func TestErrorsMapping(t *testing.T) {
err := conn.GetFsError(os.ErrNotExist)
if protocol == ProtocolSFTP {
assert.EqualError(t, err, sftp.ErrSSHFxNoSuchFile.Error())
} else if protocol == ProtocolWebDAV {
assert.EqualError(t, err, os.ErrNotExist.Error())
} else {
assert.EqualError(t, err, ErrNotExist.Error())
}

View file

@ -19,7 +19,7 @@ var (
)
// BaseTransfer contains protocols common transfer details for an upload or a download.
type BaseTransfer struct {
type BaseTransfer struct { //nolint:maligned
ID uint64
File *os.File
Connection *BaseConnection
@ -33,6 +33,7 @@ type BaseTransfer struct {
requestPath string
BytesSent int64
BytesReceived int64
AbortTransfer int32
sync.Mutex
ErrTransfer error
}
@ -54,7 +55,9 @@ func NewBaseTransfer(file *os.File, conn *BaseConnection, cancelFn func(), fsPat
requestPath: requestPath,
BytesSent: 0,
BytesReceived: 0,
AbortTransfer: 0,
}
conn.AddTransfer(t)
return t
}
@ -82,11 +85,29 @@ func (t *BaseTransfer) GetStartTime() time.Time {
return t.start
}
// SignalClose signals that the transfer should be closed.
// For same protocols, for example WebDAV, we have no
// access to the network connection, so we use this method
// to make the next read or write to fail
func (t *BaseTransfer) SignalClose() {
atomic.StoreInt32(&(t.AbortTransfer), 1)
}
// GetVirtualPath returns the transfer virtual path
func (t *BaseTransfer) GetVirtualPath() string {
return t.requestPath
}
// GetFsPath returns the transfer filesystem path
func (t *BaseTransfer) GetFsPath() string {
return t.fsPath
}
// SetCancelFn sets the cancel function for the transfer
func (t *BaseTransfer) SetCancelFn(cancelFn func()) {
t.cancelFn = cancelFn
}
// TransferError is called if there is an unexpected error.
// For example network or client issues
func (t *BaseTransfer) TransferError(err error) {
@ -106,8 +127,8 @@ func (t *BaseTransfer) TransferError(err error) {
}
// Close it is called when the transfer is completed.
// It closes the underlying file, logs the transfer info, updates the
// user quota (for uploads) and executes any defined action.
// It logs the transfer info, updates the user quota (for uploads)
// and executes any defined action.
// If there is an error no action will be executed and, in atomic mode,
// we try to delete the temporary file
func (t *BaseTransfer) Close() error {

View file

@ -55,9 +55,9 @@ func TestTransferThrottling(t *testing.T) {
testFileSize := int64(131072)
wantedUploadElapsed := 1000 * (testFileSize / 1000) / u.UploadBandwidth
wantedDownloadElapsed := 1000 * (testFileSize / 1000) / u.DownloadBandwidth
// 100 ms tolerance
wantedUploadElapsed -= 100
wantedDownloadElapsed -= 100
// some tolerance
wantedUploadElapsed -= wantedDownloadElapsed / 10
wantedDownloadElapsed -= wantedDownloadElapsed / 10
conn := NewBaseConnection("id", ProtocolSCP, u, nil)
transfer := NewBaseTransfer(nil, conn, nil, "", "", TransferUpload, 0, 0, true)
transfer.BytesReceived = testFileSize
@ -99,7 +99,10 @@ func TestTransferErrors(t *testing.T) {
assert.FailNow(t, "unable to open test file")
}
conn := NewBaseConnection("id", ProtocolSFTP, u, fs)
transfer := NewBaseTransfer(file, conn, cancelFn, testFile, "/transfer_test_file", TransferUpload, 0, 0, true)
transfer := NewBaseTransfer(file, conn, nil, testFile, "/transfer_test_file", TransferUpload, 0, 0, true)
assert.Nil(t, transfer.cancelFn)
assert.Equal(t, testFile, transfer.GetFsPath())
transfer.SetCancelFn(cancelFn)
errFake := errors.New("err fake")
transfer.BytesReceived = 9
transfer.TransferError(ErrQuotaExceeded)

View file

@ -16,6 +16,7 @@ import (
"github.com/drakkan/sftpgo/sftpd"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/version"
"github.com/drakkan/sftpgo/webdavd"
)
const (
@ -35,12 +36,13 @@ var (
)
type globalConfig struct {
Common common.Configuration `json:"common" mapstructure:"common"`
SFTPD sftpd.Configuration `json:"sftpd" mapstructure:"sftpd"`
FTPD ftpd.Configuration `json:"ftpd" mapstructure:"ftpd"`
ProviderConf dataprovider.Config `json:"data_provider" mapstructure:"data_provider"`
HTTPDConfig httpd.Conf `json:"httpd" mapstructure:"httpd"`
HTTPConfig httpclient.Config `json:"http" mapstructure:"http"`
Common common.Configuration `json:"common" mapstructure:"common"`
SFTPD sftpd.Configuration `json:"sftpd" mapstructure:"sftpd"`
FTPD ftpd.Configuration `json:"ftpd" mapstructure:"ftpd"`
WebDAVD webdavd.Configuration `json:"webdavd" mapstructure:"webdavd"`
ProviderConf dataprovider.Config `json:"data_provider" mapstructure:"data_provider"`
HTTPDConfig httpd.Conf `json:"httpd" mapstructure:"httpd"`
HTTPConfig httpclient.Config `json:"http" mapstructure:"http"`
}
func init() {
@ -85,6 +87,12 @@ func init() {
CertificateFile: "",
CertificateKeyFile: "",
},
WebDAVD: webdavd.Configuration{
BindPort: 0,
BindAddress: "",
CertificateFile: "",
CertificateKeyFile: "",
},
ProviderConf: dataprovider.Config{
Driver: "sqlite",
Name: "sftpgo.db",
@ -163,6 +171,16 @@ func SetFTPDConfig(config ftpd.Configuration) {
globalConf.FTPD = config
}
// GetWebDAVDConfig returns the configuration for the WebDAV server
func GetWebDAVDConfig() webdavd.Configuration {
return globalConf.WebDAVD
}
// SetWebDAVDConfig sets the configuration for the WebDAV server
func SetWebDAVDConfig(config webdavd.Configuration) {
globalConf.WebDAVD = config
}
// GetHTTPDConfig returns the configuration for the HTTP server
func GetHTTPDConfig() httpd.Conf {
return globalConf.HTTPDConfig

View file

@ -273,4 +273,10 @@ func TestSetGetConfig(t *testing.T) {
config.SetFTPDConfig(ftpdConf)
assert.Equal(t, ftpdConf.CertificateFile, config.GetFTPDConfig().CertificateFile)
assert.Equal(t, ftpdConf.CertificateKeyFile, config.GetFTPDConfig().CertificateKeyFile)
webDavConf := config.GetWebDAVDConfig()
webDavConf.CertificateFile = "dav_cert"
webDavConf.CertificateKeyFile = "dav_key"
config.SetWebDAVDConfig(webDavConf)
assert.Equal(t, webDavConf.CertificateFile, config.GetWebDAVDConfig().CertificateFile)
assert.Equal(t, webDavConf.CertificateKeyFile, config.GetWebDAVDConfig().CertificateKeyFile)
}

View file

@ -54,6 +54,7 @@ const (
SSHLoginMethodKeyAndPassword = "publickey+password"
SSHLoginMethodKeyAndKeyboardInt = "publickey+keyboard-interactive"
FTPLoginMethodPassword = "ftp-password"
WebDavLoginMethodPassword = "dav-password"
)
var (
@ -222,7 +223,7 @@ func (u *User) AddVirtualDirs(list []os.FileInfo, sftpPath string) []os.FileInfo
}
for _, v := range u.VirtualFolders {
if path.Dir(v.VirtualPath) == sftpPath {
fi := vfs.NewFileInfo(path.Base(v.VirtualPath), true, 0, time.Time{})
fi := vfs.NewFileInfo(v.VirtualPath, true, 0, time.Now())
found := false
for index, f := range list {
if f.Name() == fi.Name() {

View file

@ -39,5 +39,12 @@ EXPOSE 2022 8080
#ENV SFTPGO_FTPD__CERTIFICATE_FILE=/srv/sftpgo/config/mycert.crt
#ENV SFTPGO_FTPD__CERTIFICATE_KEY_FILE=/srv/sftpgo/config/mycert.key
# uncomment the following setting to enable WebDAV support
#ENV SFTPGO_WEBDAVD__BIND_PORT=8090
# it is a good idea to provide certificates to enable WebDAV over HTTPS
#ENV SFTPGO_WEBDAVD__CERTIFICATE_FILE=${CONFIG_DIR}/mycert.crt
#ENV SFTPGO_WEBDAVD__CERTIFICATE_KEY_FILE=${CONFIG_DIR}/mycert.key
ENTRYPOINT ["/bin/entrypoint.sh"]
CMD ["serve"]

View file

@ -46,7 +46,7 @@ sudo docker rm sftpgo && sudo docker run --name sftpgo \
sftpgo
```
If you want to enable FTP/S you also need the publish the FTP port and the FTP passive port range, defined in your `Dockerfile`, by adding, for example, the following options to the `docker run` command `-p 2121:2121 -p 50000-50100:50000-50100`
If you want to enable FTP/S you also need the publish the FTP port and the FTP passive port range, defined in your `Dockerfile`, by adding, for example, the following options to the `docker run` command `-p 2121:2121 -p 50000-50100:50000-50100`. The same goes for WebDAV, you need to publish the configured port.
The script `entrypoint.sh` makes sure to correct the permissions of directories and start the process with the right user.

View file

@ -82,5 +82,12 @@ ENV SFTPGO_HTTPD__BACKUPS_PATH=${BACKUPS_DIR}
#ENV SFTPGO_FTPD__CERTIFICATE_FILE=${CONFIG_DIR}/mycert.crt
#ENV SFTPGO_FTPD__CERTIFICATE_KEY_FILE=${CONFIG_DIR}/mycert.key
# uncomment the following setting to enable WebDAV support
#ENV SFTPGO_WEBDAVD__BIND_PORT=8090
# it is a good idea to provide certificates to enable WebDAV over HTTPS
#ENV SFTPGO_WEBDAVD__CERTIFICATE_FILE=${CONFIG_DIR}/mycert.crt
#ENV SFTPGO_WEBDAVD__CERTIFICATE_KEY_FILE=${CONFIG_DIR}/mycert.key
ENTRYPOINT ["sftpgo"]
CMD ["serve"]

View file

@ -54,4 +54,4 @@ and finally you can run the image using something like this:
docker rm sftpgo && docker run --name sftpgo -p 8080:8080 -p 2022:2022 --mount type=bind,source=/srv/sftpgo/data,target=/app/data --mount type=bind,source=/srv/sftpgo/config,target=/app/config --mount type=bind,source=/srv/sftpgo/backups,target=/app/backups drakkan/sftpgo
```
If you want to enable FTP/S you also need the publish the FTP port and the FTP passive port range, defined in your `Dockerfile`, by adding, for example, the following options to the `docker run` command `-p 2121:2121 -p 50000-50100:50000-50100`
If you want to enable FTP/S you also need the publish the FTP port and the FTP passive port range, defined in your `Dockerfile`, by adding, for example, the following options to the `docker run` command `-p 2121:2121 -p 50000-50100:50000-50100`. The same goes for WebDAV, you need to publish the configured port.

View file

@ -70,8 +70,8 @@ The configuration file contains the following sections:
- `private_key`, path to the private key file. It can be a path relative to the config dir or an absolute one.
- `host_keys`, list of strings. It contains the daemon's private host keys. Each host key can be defined as a path relative to the configuration directory or an absolute one. If empty, the daemon will search or try to generate `id_rsa` and `id_ecdsa` keys inside the configuration directory. If you configure absolute paths to files named `id_rsa` and/or `id_ecdsa` then SFTPGo will try to generate these keys using the default settings.
- `kex_algorithms`, list of strings. Available KEX (Key Exchange) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L46 "Supported kex algos")
- `ciphers`, list of strings. Allowed ciphers. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L28 "Supported ciphers")
- `macs`, list of strings. Available MAC (message authentication code) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L84 "Supported MACs")
- `ciphers`, list of strings. Allowed ciphers. Leave empty to use default values. The supported values can be found here: [crypto/ssh](https://github.com/golang/crypto/blob/master/ssh/common.go#L28 "Supported ciphers")
- `macs`, list of strings. Available MAC (message authentication code) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [crypto/ssh](https://github.com/golang/crypto/blob/master/ssh/common.go#L84 "Supported MACs")
- `trusted_user_ca_keys`, list of public keys paths of certificate authorities that are trusted to sign user certificates for authentication. The paths can be absolute or relative to the configuration directory.
- `login_banner_file`, path to the login banner file. The contents of the specified file, if any, are sent to the remote user before authentication is allowed. It can be a path relative to the config dir or an absolute one. Leave empty to disable login banner.
- `setstat_mode`, integer. Deprecated, please use the same key in `common` section.
@ -88,7 +88,12 @@ The configuration file contains the following sections:
- `force_passive_ip`, ip address. External IP address to expose for passive connections. Leavy empty to autodetect. Defaut: "".
- `passive_port_range`, struct containing the key `start` and `end`. Port Range for data connections. Random if not specified. Default range is 50000-50100.
- `certificate_file`, string. Certificate for FTPS. This can be an absolute path or a path relative to the config dir.
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided, the server will accept both plain FTP an explicit FTP over TLS. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided the server will accept both plain FTP an explicit FTP over TLS. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
- **webdavd**, the configuration for the WebDAV server, more info [here](./webdav.md)
- `bind_port`, integer. The port used for serving WebDAV requests. 0 means disabled. Default: 0.
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: "".
- `certificate_file`, string. Certificate for WebDAV over HTTPS. This can be an absolute path or a path relative to the config dir.
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided the server will expect HTTPS connections. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
- **"data_provider"**, the configuration for the data provider
- `driver`, string. Supported drivers are `sqlite`, `mysql`, `postgresql`, `bolt`, `memory`
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database. For driver `memory` this is the (optional) path relative to the config dir or the absolute path to the users dump, obtained using the `dumpdata` REST API, to load. This dump will be loaded at startup and can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. The `memory` provider will not modify the provided file so quota usage and last login will not be persisted

View file

@ -11,7 +11,7 @@ For system commands we have no direct control on file creation/deletion and so t
If quota is enabled and SFTPGO receives a system command, the used size and number of files are checked at the command start and not while new files are created/deleted. While the command is running the number of files is not checked, the remaining size is calculated as the difference between the max allowed quota and the used one, and it is checked against the bytes transferred via SSH. The command is aborted if it uploads more bytes than the remaining allowed size calculated at the command start. Anyway, we only see the bytes that the remote command sends to the local one via SSH. These bytes contain both protocol commands and files, and so the size of the files is different from the size trasferred via SSH: for example, a command can send compressed files, or a protocol command (few bytes) could delete a big file. To mitigate these issues, quotas are recalculated at the command end with a full scan of the directory specified for the system command. This could be heavy for big directories. If you need system commands and quotas you could consider disabling quota restrictions and periodically update quota usage yourself using the REST API.
For these reasons we should limit system commands usage as much as possibile, we currently support the following system commands:
For these reasons we should limit system commands usage as much as possible, we currently support the following system commands:
- `git-receive-pack`, `git-upload-pack`, `git-upload-archive`. These commands enable support for Git repositories over SSH. They need to be installed and in your system's `PATH`.
- `rsync`. The `rsync` command needs to be installed and in your system's `PATH`. We cannot avoid that rsync creates symlinks, so if the user has the permission to create symlinks, we add the option `--safe-links` to the received rsync command if it is not already set. This should prevent creating symlinks that point outside the home dir. If the user cannot create symlinks, we add the option `--munge-links` if it is not already set. This should make symlinks unusable (but manually recoverable).
@ -21,8 +21,8 @@ SFTPGo support the following built-in SSH commands:
- `scp`, SFTPGo implements the SCP protocol so we can support it for cloud filesystems too and we can avoid the other system commands limitations. SCP between two remote hosts is supported using the `-3` scp option.
- `md5sum`, `sha1sum`, `sha256sum`, `sha384sum`, `sha512sum`. Useful to check message digests for uploaded files.
- `cd`, `pwd`. Some SFTP clients do not support the SFTP SSH_FXP_REALPATH packet type, so they use `cd` and `pwd` SSH commands to get the initial directory. Currently `cd` does nothing and `pwd` always returns the `/` path.
- `sftpgo-copy`. This is a built-in copy implementation. It allows server side copy for files and directories. The first argument is the source file/directory and the second one is the destination file/directory, for example `sftpgo-copy <src> <dst>`. The command will fail if the destination exists. Copy for directories spanning virtual folders is not supported. Only local filesystem is supported: recursive copy for Cloud Storage filesystems requires a new request for every file in any case, so a real server side copy is not possibile.
- `sftpgo-remove`. This is a built-in remove implementation. It allows to remove single files and to recursively remove directories. The first argument is the file/directory to remove, for example `sftpgo-remove <dst>`. Only local filesystem is supported: recursive remove for Cloud Storage filesystems requires a new request for every file in any case, so a server side remove is not possibile.
- `sftpgo-copy`. This is a built-in copy implementation. It allows server side copy for files and directories. The first argument is the source file/directory and the second one is the destination file/directory, for example `sftpgo-copy <src> <dst>`. The command will fail if the destination exists. Copy for directories spanning virtual folders is not supported. Only local filesystem is supported: recursive copy for Cloud Storage filesystems requires a new request for every file in any case, so a real server side copy is not possible.
- `sftpgo-remove`. This is a built-in remove implementation. It allows to remove single files and to recursively remove directories. The first argument is the file/directory to remove, for example `sftpgo-remove <dst>`. Only local filesystem is supported: recursive remove for Cloud Storage filesystems requires a new request for every file in any case, so a server side remove is not possible.
The following SSH commands are enabled by default:

22
docs/webdav.md Normal file
View file

@ -0,0 +1,22 @@
# WebDAV
The experimental `WebDAV` support can be enabled setting a `bind_port` inside the `webdavd` configuration section.
Each user has his own path like `http/s://<SFTPGo ip>:<WevDAVPORT>/<username>` and it must authenticate using password credentials.
WebDAV should work as expected for most use cases but there are some minor issues and some missing features.
Know issues:
- removing a directory tree on Cloud Storage backends could generate a `not found` error when removing the last (virtual) directory. This happen if the client cycles the directories tree itself and removes files and directories one by one instead of issuing a single remove command
- the used [WebDAV library](https://pkg.go.dev/golang.org/x/net/webdav?tab=doc) asks to open a file to execute a `stat` and sometime reads some bytes to find the content type. We are unable to distinguish a `stat` from a `download` for now, so to be able to proper list a directory you need to grant both `list` and `download` permissions
- the used `WebDAV library` not always returns a proper error code/message, most of the times it simply returns `Method not Allowed`. I'll try to improve the library error codes in the future
- WebDAV is quite a different protocol than SCP/FTP, there is no session concept, each command is a separate HTTP request, we could improve the performance by caching, for a small time, the user info so we don't need a user lookup (and so a dataprovider query) for each request. Some clients issue a lot of requests only for listing a directory contents. This needs more investigation and a design decision anyway the protocol itself is quite heavy
- if an object within a directory cannot be accessed, for example due to OS permissions issues or because is a missing mapped path for a virtual folder, the directory listing will fail. In SFTP/FTP the directory listing will succeed and you'll only get an error if you try to access to the problematic file/directory
We plan to add the following features in future releases:
- [CORS](http://www.w3.org/TR/cors/) support
- [Dead Properties](https://tools.ietf.org/html/rfc4918#section-3) support. We need a design decision here, probably the best solution is to store dead properties inside the data provider but this could increase a lot its size. Alternately we could store them on disk for local filesystem and add as metadata for Cloud Storage, this means that we need to do a separate `HEAD` request to retrieve dead properties for an S3 file. For big folders will do a lot of requests to the Cloud Provider, I don't like this solution. Another option is to expose a hook and allow you to implement `dead properties` outside SFTPGo.
If you find any other quircks or problems please let us know opening a GitHub issue, thank you!

View file

@ -54,7 +54,7 @@ type Configuration struct {
// Initialize configures and starts the FTP server
func (c *Configuration) Initialize(configDir string) error {
var err error
logger.Debug(logSender, "", "initializing FTP server with config %+v", c)
logger.Debug(logSender, "", "initializing FTP server with config %+v", *c)
server, err = NewServer(c, configDir)
if err != nil {
return err

View file

@ -61,6 +61,8 @@ UM2lmBLIXpGgBwYFK4EEACKhZANiAARCjRMqJ85rzMC998X5z761nJ+xL3bkmGVq
WvrJ51t5OxV0v25NsOgR82CANXUgvhVYs7vNFN+jxtb2aj6Xg+/2G/BNxkaFspIV
CzgWkxiz7XE4lgUwX44FCXZM3+JeUbI=
-----END EC PRIVATE KEY-----`
testFileName = "test_file_ftp.dat"
testDLFileName = "test_download_ftp.dat"
)
var (
@ -106,8 +108,8 @@ func TestMain(m *testing.M) {
}
}
certPath := filepath.Join(os.TempDir(), "test.crt")
keyPath := filepath.Join(os.TempDir(), "test.key")
certPath := filepath.Join(os.TempDir(), "test_ftpd.crt")
keyPath := filepath.Join(os.TempDir(), "test_ftpd.key")
err = ioutil.WriteFile(certPath, []byte(ftpsCert), os.ModePerm)
if err != nil {
logger.ErrorToConsole("error writing FTPS certificate: %v", err)
@ -184,7 +186,6 @@ func TestBasicFTPHandling(t *testing.T) {
client, err := getFTPClient(user, true)
if assert.NoError(t, err) {
assert.Len(t, common.Connections.GetStats(), 1)
testFileName := "test_file.dat" //nolint:goconst
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
expectedQuotaSize := user.UsedQuotaSize + testFileSize
@ -201,7 +202,7 @@ func TestBasicFTPHandling(t *testing.T) {
// overwrite an existing file
err = ftpUploadFile(testFilePath, testFileName, testFileSize, client, 0)
assert.NoError(t, err)
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = ftpDownloadFile(testFileName, localDownloadPath, testFileSize, client, 0)
assert.NoError(t, err)
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
@ -308,6 +309,7 @@ func TestLoginExternalAuth(t *testing.T) {
assert.NoError(t, err)
if assert.Len(t, users, 1) {
user := users[0]
assert.Equal(t, defaultUsername, user.Username)
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
err = os.RemoveAll(user.GetHomeDir())
@ -505,7 +507,7 @@ func TestZeroBytesTransfers(t *testing.T) {
assert.NoError(t, err)
}
func TestDownloadsError(t *testing.T) {
func TestDownloadErrors(t *testing.T) {
u := getTestUser()
u.QuotaFiles = 1
subDir1 := "sub1"
@ -534,7 +536,7 @@ func TestDownloadsError(t *testing.T) {
assert.NoError(t, err)
err = ioutil.WriteFile(testFilePath2, []byte("file2"), os.ModePerm)
assert.NoError(t, err)
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = ftpDownloadFile(path.Join("/", subDir1, "file.zip"), localDownloadPath, 5, client, 0)
assert.Error(t, err)
err = ftpDownloadFile(path.Join("/", subDir2, "file.zip"), localDownloadPath, 5, client, 0)
@ -571,7 +573,6 @@ func TestUploadErrors(t *testing.T) {
assert.NoError(t, err)
client, err := getFTPClient(user, true)
if assert.NoError(t, err) {
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := user.QuotaSize
err = createTestFile(testFilePath, testFileSize)
@ -609,6 +610,8 @@ func TestUploadErrors(t *testing.T) {
assert.Error(t, err)
err = client.Quit()
assert.NoError(t, err)
err = os.Remove(testFilePath)
assert.NoError(t, err)
}
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
@ -622,7 +625,6 @@ func TestResume(t *testing.T) {
assert.NoError(t, err)
client, err := getFTPClient(user, true)
if assert.NoError(t, err) {
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
data := []byte("test data")
err = ioutil.WriteFile(testFilePath, data, os.ModePerm)
@ -634,7 +636,7 @@ func TestResume(t *testing.T) {
readed, err := ioutil.ReadFile(filepath.Join(user.GetHomeDir(), testFileName))
assert.NoError(t, err)
assert.Equal(t, "test test data", string(readed))
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = ftpDownloadFile(testFileName, localDownloadPath, int64(len(data)), client, 5)
assert.NoError(t, err)
readed, err = ioutil.ReadFile(localDownloadPath)
@ -680,7 +682,6 @@ func TestQuotaLimits(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileSize := int64(65535)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -830,7 +831,6 @@ func TestRename(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testDir := "adir"
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -907,7 +907,6 @@ func TestStat(t *testing.T) {
}()
subDir := "subdir"
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -954,7 +953,6 @@ func TestUploadOverwriteVfolder(t *testing.T) {
assert.NoError(t, err)
client, err := getFTPClient(user, false)
if assert.NoError(t, err) {
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -1020,7 +1018,6 @@ func TestAllocate(t *testing.T) {
assert.NoError(t, err)
client, err = getFTPClient(user, false)
if assert.NoError(t, err) {
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := user.QuotaSize - 1
err = createTestFile(testFilePath, testFileSize)
@ -1079,7 +1076,6 @@ func TestChtimes(t *testing.T) {
assert.NoError(t, err)
}()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -1118,7 +1114,6 @@ func TestChmod(t *testing.T) {
assert.NoError(t, err)
}()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(131072)
err = createTestFile(testFilePath, testFileSize)

View file

@ -150,7 +150,7 @@ func (c *Connection) Stat(name string) (os.FileInfo, error) {
}
fi, err := c.Fs.Stat(p)
if err != nil {
c.Log(logger.LevelWarn, "error running stat on path: %+v", err)
c.Log(logger.LevelWarn, "error running stat on path %#v: %+v", p, err)
return nil, c.GetFsError(err)
}
return fi, nil

View file

@ -96,7 +96,7 @@ func (s *Server) GetSettings() (*ftpserver.Settings, error) {
// ClientConnected is called to send the very first welcome message
func (s *Server) ClientConnected(cc ftpserver.ClientContext) (string, error) {
if err := common.Config.ExecutePostConnectHook(cc.RemoteAddr(), common.ProtocolFTP); err != nil {
if err := common.Config.ExecutePostConnectHook(cc.RemoteAddr().String(), common.ProtocolFTP); err != nil {
return common.ErrConnectionDenied.Error(), err
}
connID := fmt.Sprintf("%v", cc.ID())
@ -120,13 +120,13 @@ func (s *Server) AuthUser(cc ftpserver.ClientContext, username, password string)
remoteAddr := cc.RemoteAddr().String()
user, err := dataprovider.CheckUserAndPass(username, password, utils.GetIPFromRemoteAddress(remoteAddr))
if err != nil {
updateLoginMetrics(username, remoteAddr, dataprovider.FTPLoginMethodPassword, err)
updateLoginMetrics(username, remoteAddr, err)
return nil, err
}
connection, err := s.validateUser(user, cc)
defer updateLoginMetrics(username, remoteAddr, dataprovider.FTPLoginMethodPassword, err)
defer updateLoginMetrics(username, remoteAddr, err)
if err != nil {
return nil, err
@ -188,10 +188,11 @@ func (s *Server) validateUser(user dataprovider.User, cc ftpserver.ClientContext
return connection, nil
}
func updateLoginMetrics(username, remoteAddress, method string, err error) {
metrics.AddLoginAttempt(method)
func updateLoginMetrics(username, remoteAddress string, err error) {
metrics.AddLoginAttempt(dataprovider.FTPLoginMethodPassword)
if err != nil {
logger.ConnectionFailedLog(username, utils.GetIPFromRemoteAddress(remoteAddress), method, err.Error())
logger.ConnectionFailedLog(username, utils.GetIPFromRemoteAddress(remoteAddress),
dataprovider.FTPLoginMethodPassword, err.Error())
}
metrics.AddLoginResult(method, err)
metrics.AddLoginResult(dataprovider.FTPLoginMethodPassword, err)
}

View file

@ -83,6 +83,7 @@ func (t *transfer) Write(p []byte) (n int, err error) {
// Seek sets the offset to resume an upload or a download
func (t *transfer) Seek(offset int64, whence int) (int64, error) {
t.Connection.UpdateLastActivity()
if t.File != nil {
ret, err := t.File.Seek(offset, whence)
if err != nil {

27
go.mod
View file

@ -3,10 +3,10 @@ module github.com/drakkan/sftpgo
go 1.13
require (
cloud.google.com/go v0.62.0 // indirect
cloud.google.com/go v0.63.0 // indirect
cloud.google.com/go/storage v1.10.0
github.com/alexedwards/argon2id v0.0.0-20200522061839-9369edc04b05
github.com/aws/aws-sdk-go v1.33.16
github.com/alexedwards/argon2id v0.0.0-20200802152012-2464efd3196b
github.com/aws/aws-sdk-go v1.34.1
github.com/eikenb/pipeat v0.0.0-20200430215831-470df5986b6d
github.com/fclairamb/ftpserverlib v0.8.1-0.20200729230026-7f0ab9d81bb6
github.com/fsnotify/fsnotify v1.4.9 // indirect
@ -18,7 +18,7 @@ require (
github.com/jlaffaye/ftp v0.0.0-20200720194710-13949d38913e
github.com/lib/pq v1.8.0
github.com/mattn/go-sqlite3 v1.14.0
github.com/miekg/dns v1.1.30 // indirect
github.com/miekg/dns v1.1.31 // indirect
github.com/mitchellh/mapstructure v1.3.3 // indirect
github.com/nathanaelle/password/v2 v2.0.1
github.com/otiai10/copy v1.2.0
@ -26,28 +26,31 @@ require (
github.com/pires/go-proxyproto v0.1.3
github.com/pkg/sftp v1.11.1-0.20200731124947-b508b936bef3
github.com/prometheus/client_golang v1.7.1
github.com/prometheus/common v0.11.1 // indirect
github.com/rs/xid v1.2.1
github.com/rs/zerolog v1.19.0
github.com/spf13/afero v1.3.2
github.com/spf13/afero v1.3.4
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/cobra v1.0.0
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.7.0
github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.6.1
github.com/studio-b12/gowebdav v0.0.0-20200303150724-9380631c29a1
go.etcd.io/bbolt v1.3.5
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6 // indirect
google.golang.org/api v0.29.0
google.golang.org/genproto v0.0.0-20200731012542-8145dea6a485 // indirect
google.golang.org/grpc v1.31.0 // indirect
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed
golang.org/x/tools v0.0.0-20200811032001-fd80f4dbb3ea // indirect
google.golang.org/api v0.30.0
google.golang.org/genproto v0.0.0-20200808173500-a06252235341 // indirect
gopkg.in/ini.v1 v1.57.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0
)
replace (
github.com/jlaffaye/ftp => github.com/drakkan/ftp v0.0.0-20200730125632-b21eac28818c
github.com/fclairamb/ftpserverlib => github.com/drakkan/ftpserverlib v0.0.0-20200731183125-82c4b2b9bb35
github.com/jlaffaye/ftp => github.com/drakkan/ftp v0.0.0-20200730125632-b21eac28818c
golang.org/x/crypto => github.com/drakkan/crypto v0.0.0-20200731130417-7674a892f9b1
golang.org/x/net => github.com/drakkan/net v0.0.0-20200807161257-daa5cda5ae27
)

119
go.sum
View file

@ -11,8 +11,9 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0 h1:RmDygqvj27Zf3fCQjQRtLyC7KwFcHkeJitcO0OoGOcA=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.63.0 h1:A+DfAZQ/eWca7gvu42CS6FNSDX4R8cghF+XfWLn4R6g=
cloud.google.com/go v0.63.0/go.mod h1:GmezbQc7T2snqkEXWfZ0sy0VfkB/ivI2DdtJL2DEmlg=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -33,6 +34,7 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
@ -46,8 +48,9 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alexedwards/argon2id v0.0.0-20200522061839-9369edc04b05 h1:votg1faEmwABhCeJ4tiBrvwk4BWftQGkEtFy5iuI7rU=
github.com/alexedwards/argon2id v0.0.0-20200522061839-9369edc04b05/go.mod h1:GFtu6vaWaRJV5EvSFaVqgq/3Iq95xyYElBV/aupGzUo=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alexedwards/argon2id v0.0.0-20200802152012-2464efd3196b h1:rcCpjI1OMGtBY8nnBvExeM1pXNoaM35zqmXBGpgJR2o=
github.com/alexedwards/argon2id v0.0.0-20200802152012-2464efd3196b/go.mod h1:GFtu6vaWaRJV5EvSFaVqgq/3Iq95xyYElBV/aupGzUo=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@ -58,8 +61,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.33.16 h1:h/3BL2BQMEbS67BPoEo/5jD8IPGVrKBmoa4S9mBBntw=
github.com/aws/aws-sdk-go v1.33.16/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.34.1 h1:jM0mJ9JSJyhujwxBNYKrNB8Iwp8N7J2WsQxTR4yPSck=
github.com/aws/aws-sdk-go v1.34.1/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -107,6 +110,8 @@ github.com/drakkan/ftp v0.0.0-20200730125632-b21eac28818c h1:QSXIWohSNn0negBVSKE
github.com/drakkan/ftp v0.0.0-20200730125632-b21eac28818c/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
github.com/drakkan/ftpserverlib v0.0.0-20200731183125-82c4b2b9bb35 h1:fS1f/T5ruUySzUD5m4VFhnEXjgPAUm/KwLZG+s/W83E=
github.com/drakkan/ftpserverlib v0.0.0-20200731183125-82c4b2b9bb35/go.mod h1:Jwd+zOP3T0kwiCQcgjpu3VWtc7AI6Nu4UPN2HYqaniM=
github.com/drakkan/net v0.0.0-20200807161257-daa5cda5ae27 h1:hh14GxmE3PMKL+4nvMmX7O8CUtbD/52IKDjbMTYX7IY=
github.com/drakkan/net v0.0.0-20200807161257-daa5cda5ae27/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
@ -135,13 +140,16 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@ -160,6 +168,7 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -183,9 +192,12 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@ -201,6 +213,7 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -242,28 +255,36 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg=
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
@ -283,8 +304,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.30 h1:Qww6FseFn8PRfw07jueqIXqodm0JKiiKuK0DeXSqfyo=
github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@ -302,6 +323,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nathanaelle/password/v2 v2.0.1 h1:ItoCTdsuIWzilYmllQPa3DR3YoCXcpfxScWLqr8Ii2s=
github.com/nathanaelle/password/v2 v2.0.1/go.mod h1:eaoT+ICQEPNtikBRIAatN8ThWwMhVG+r1jTw60BvPJk=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
@ -330,8 +352,10 @@ github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/curr v1.0.0 h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI=
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
github.com/otiai10/mint v1.3.1 h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc=
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@ -374,8 +398,9 @@ github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.11.1 h1:0ZISXCMRuCZcxF77aT1BXY5m74mX2vrGYl1dSwBI0Jo=
github.com/prometheus/common v0.11.1/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -396,19 +421,24 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a h1:C6IhVTxNkhlb0tlCB6JfHOUv1f0xHPK7V8X4HlJZEJw=
github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.3.2 h1:GDarE4TJQI52kYSbSAmLiId1Elfj+xgSDqrUZxFhxlU=
github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.3.4 h1:8q6vk3hthlpb2SouZcnBVKboxWQWMDNF38bwholZrJc=
github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
@ -423,8 +453,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
@ -436,6 +466,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/studio-b12/gowebdav v0.0.0-20200303150724-9380631c29a1 h1:TPyHV/OgChqNcnYqCoCvIFjR9TU60gFXXBKnhOBzVEI=
github.com/studio-b12/gowebdav v0.0.0-20200303150724-9380631c29a1/go.mod h1:gCcfDlA1Y7GqOaeEKw5l9dOGx1VLdc/HuQSlQAaZ30s=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -490,6 +522,7 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@ -498,44 +531,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -549,6 +546,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -588,9 +586,11 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200722175500-76b94024e4b6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -648,10 +648,15 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200806022845-90696ccdc692/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200811032001-fd80f4dbb3ea h1:9ym67RBRK/wN50W0T3g8g1n8viM1D2ofgWufDlMfWe0=
golang.org/x/tools v0.0.0-20200811032001-fd80f4dbb3ea/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@ -667,14 +672,16 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -704,8 +711,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200731012542-8145dea6a485 h1:wTk5DQB3+1darAz4Ldomo0r5bUOCKX7gilxQ4sb2kno=
google.golang.org/genproto v0.0.0-20200731012542-8145dea6a485/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200808173500-a06252235341 h1:Kceb+1TNS2X7Cj/A+IUTljNerF/4wOFjlFJ0RGHYKKE=
google.golang.org/genproto v0.0.0-20200808173500-a06252235341/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -738,8 +747,10 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/dutchcoders/goftp.v1 v1.0.0-20170301105846-ed59a591ce14 h1:tHqNpm9sPaE6BSuMLXBzgTwukQLdBEt4OYU2coQjEQQ=
gopkg.in/dutchcoders/goftp.v1 v1.0.0-20170301105846-ed59a591ce14/go.mod h1:nzmlZQ+UqB5+55CRTV/dOaiK8OrPl6Co96Ob8lH4Wxw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=

View file

@ -2,7 +2,7 @@ openapi: 3.0.1
info:
title: SFTPGo
description: 'SFTPGo REST API'
version: 1.9.2
version: 1.9.3
servers:
- url: /api/v1
@ -1484,7 +1484,7 @@ components:
properties:
path:
type: string
description: SFTP/SCP path, if no other specific filter is defined, the filter apply for sub directories too. For example if filters are defined for the paths "/" and "/sub" then the filters for "/" are applied for any file outside the "/sub" directory
description: exposed SFTPGo path, if no other specific filter is defined, the filter apply for sub directories too. For example if filters are defined for the paths "/" and "/sub" then the filters for "/" are applied for any file outside the "/sub" directory
allowed_extensions:
type: array
items:
@ -1558,7 +1558,7 @@ components:
description: the number of parts to upload in parallel. If this value is set to zero, the default value (2) will be used
key_prefix:
type: string
description: key_prefix is similar to a chroot directory for a local filesystem. If specified the SFTP user will only see contents that starts with this prefix and so you can restrict access to a specific virtual folder. The prefix, if not empty, must not start with "/" and must end with "/". If empty the whole bucket contents will be available
description: key_prefix is similar to a chroot directory for a local filesystem. If specified the user will only see contents that starts with this prefix and so you can restrict access to a specific virtual folder. The prefix, if not empty, must not start with "/" and must end with "/". If empty the whole bucket contents will be available
example: folder/subfolder/
required:
- bucket
@ -1589,7 +1589,7 @@ components:
type: string
key_prefix:
type: string
description: key_prefix is similar to a chroot directory for a local filesystem. If specified the SFTP user will only see contents that starts with this prefix and so you can restrict access to a specific virtual folder. The prefix, if not empty, must not start with "/" and must end with "/". If empty the whole bucket contents will be available
description: key_prefix is similar to a chroot directory for a local filesystem. If specified the user will only see contents that starts with this prefix and so you can restrict access to a specific virtual folder. The prefix, if not empty, must not start with "/" and must end with "/". If empty the whole bucket contents will be available
example: folder/subfolder/
required:
- bucket
@ -1653,14 +1653,14 @@ components:
quota_size:
type: integer
format: int64
description: Quota as size in bytes. 0 menas unlimited, -1 means included in user quota. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
description: Quota as size in bytes. 0 menas unlimited, -1 means included in user quota. Please note that quota is updated if files are added/removed via SFTPGo otherwise a quota scan or a manual quota update is needed
quota_files:
type: integer
format: int32
description: Quota as number of files. 0 menas unlimited, , -1 means included in user quota. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
description: Quota as number of files. 0 menas unlimited, , -1 means included in user quota. Please note that quota is updated if files are added/removed via SFTPGo otherwise a quota scan or a manual quota update is needed
required:
- virtual_path
description: A virtual folder is a mapping between a SFTP/SCP virtual path and a filesystem path outside the user home directory. The specified paths must be absolute and the virtual path cannot be "/", it must be a sub directory. The parent directory for the specified virtual path must exist. SFTPGo will try to automatically create any missing parent directory for the configured virtual folders at user login.
description: A virtual folder is a mapping between a SFTPGo virtual path and a filesystem path outside the user home directory. The specified paths must be absolute and the virtual path cannot be "/", it must be a sub directory. The parent directory for the specified virtual path must exist. SFTPGo will try to automatically create any missing parent directory for the configured virtual folders at user login.
User:
type: object
properties:
@ -1702,19 +1702,19 @@ components:
items:
$ref: '#/components/schemas/VirtualFolder'
nullable: true
description: mapping between virtual SFTP/SCP paths and filesystem paths outside the user home directory. Supported for local filesystem only. If one or more of the specified folders are not inside the dataprovider they will be automatically created. You have to create the folder on the filesystem yourself
description: mapping between virtual SFTPGo paths and filesystem paths outside the user home directory. Supported for local filesystem only. If one or more of the specified folders are not inside the dataprovider they will be automatically created. You have to create the folder on the filesystem yourself
uid:
type: integer
format: int32
minimum: 0
maximum: 65535
description: if you run sftpgo as root user the created files and directories will be assigned to this uid. 0 means no change, the owner will be the user that runs sftpgo. Ignored on windows
description: if you run SFTPGo as root user, the created files and directories will be assigned to this uid. 0 means no change, the owner will be the user that runs SFTPGo. Ignored on windows
gid:
type: integer
format: int32
minimum: 0
maximum: 65535
description: if you run sftpgo as root user the created files and directories will be assigned to this gid. 0 means no change, the group will be the one of the user that runs sftpgo. Ignored on windows
description: if you run SFTPGo as root user, the created files and directories will be assigned to this gid. 0 means no change, the group will be the one of the user that runs SFTPGo. Ignored on windows
max_sessions:
type: integer
format: int32
@ -1722,11 +1722,11 @@ components:
quota_size:
type: integer
format: int64
description: Quota as size in bytes. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
description: Quota as size in bytes. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTPGo otherwise a quota scan or a manual quota update is needed
quota_files:
type: integer
format: int32
description: Quota as number of files. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
description: Quota as number of files. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTPGo otherwise a quota scan or a manual quota update is needed
permissions:
type: object
items:
@ -1798,10 +1798,10 @@ components:
type: integer
format: int64
description: connection time as unix timestamp in milliseconds
ssh_command:
command:
type: string
nullable: true
description: SSH command. This is not empty for protocol SSH
description: SSH command or WebDAV method
last_activity:
type: integer
format: int64
@ -1812,6 +1812,8 @@ components:
- SFTP
- SCP
- SSH
- FTP
- DAV
active_transfers:
type: array
nullable: true
@ -1868,7 +1870,7 @@ components:
type: array
items:
type: string
description: Features for the current build. Available features are "portable", "bolt", "mysql", "sqlite", "pgsql", "s3", "gcs". If a feature is available it has a "+" prefix, otherwise a "-" prefix
description: Features for the current build. Available features are "portable", "bolt", "mysql", "sqlite", "pgsql", "s3", "gcs", "metrics". If a feature is available it has a "+" prefix, otherwise a "-" prefix
securitySchemes:
BasicAuth:
type: http

View file

@ -98,6 +98,7 @@ func (s *Service) startServices() {
sftpdConf := config.GetSFTPDConfig()
ftpdConf := config.GetFTPDConfig()
httpdConf := config.GetHTTPDConfig()
webDavDConf := config.GetWebDAVDConfig()
go func() {
logger.Debug(logSender, "", "initializing SFTP server with config %+v", sftpdConf)
@ -136,6 +137,18 @@ func (s *Service) startServices() {
} else {
logger.Debug(logSender, "", "FTP server not started, disabled in config file")
}
if webDavDConf.BindPort > 0 {
go func() {
if err := webDavDConf.Initialize(s.ConfigDir); err != nil {
logger.Error(logSender, "", "could not start WebDAV server: %v", err)
logger.ErrorToConsole("could not start WebDAV server: %v", err)
s.Error = err
}
s.Shutdown <- true
}()
} else {
logger.Debug(logSender, "", "WevDAV server not started, disabled in config file")
}
}
// Wait blocks until the service exits

View file

@ -15,6 +15,7 @@ import (
"github.com/drakkan/sftpgo/ftpd"
"github.com/drakkan/sftpgo/httpd"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/webdavd"
)
const (
@ -98,6 +99,10 @@ loop:
if err != nil {
logger.Warn(logSender, "", "error reloading FTPD TLS certificate: %v", err)
}
err = webdavd.ReloadTLSCertificate()
if err != nil {
logger.Warn(logSender, "", "error reloading WebDav TLS certificate: %v", err)
}
case rotateLogCmd:
logger.Debug(logSender, "", "Received log file rotation request")
err := logger.RotateLogFile()

View file

@ -11,6 +11,7 @@ import (
"github.com/drakkan/sftpgo/ftpd"
"github.com/drakkan/sftpgo/httpd"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/webdavd"
)
func registerSigHup() {
@ -31,6 +32,10 @@ func registerSigHup() {
if err != nil {
logger.Warn(logSender, "", "error reloading FTPD TLS certificate: %v", err)
}
err = webdavd.ReloadTLSCertificate()
if err != nil {
logger.Warn(logSender, "", "error reloading WebDav TLS certificate: %v", err)
}
}
}()
}

View file

@ -190,7 +190,7 @@ func (c *Connection) Filelist(request *sftp.Request) (sftp.ListerAt, error) {
s, err := c.Fs.Stat(p)
if err != nil {
c.Log(logger.LevelWarn, "error running stat on path: %+v", err)
c.Log(logger.LevelWarn, "error running stat on path %#v: %+v", p, err)
return nil, c.GetFsError(err)
}

View file

@ -516,7 +516,7 @@ func TestSSHCommandErrors(t *testing.T) {
cmd = sshCommand{
command: "md5sum",
connection: &connection,
args: []string{"/../../test_file.dat"},
args: []string{"/../../test_file_ftp.dat"},
}
err = cmd.handle()
assert.Error(t, err, "ssh command must fail, we are requesting an invalid path")
@ -1020,10 +1020,10 @@ func TestGetConnectionInfo(t *testing.T) {
ClientVersion: "client",
RemoteAddress: "127.0.0.1:1234",
Protocol: common.ProtocolSSH,
SSHCommand: "sha1sum /test_file.dat",
Command: "sha1sum /test_file_ftp.dat",
}
info := c.GetConnectionInfo()
assert.Contains(t, info, "sha1sum /test_file.dat")
assert.Contains(t, info, "sha1sum /test_file_ftp.dat")
}
func TestSCPFileMode(t *testing.T) {

View file

@ -449,7 +449,7 @@ func (c *scpCommand) handleDownload(filePath string) error {
var stat os.FileInfo
if stat, err = c.connection.Fs.Stat(p); err != nil {
c.connection.Log(logger.LevelWarn, "error downloading file: %#v, err: %v", p, err)
c.connection.Log(logger.LevelWarn, "error downloading file: %#v->%#v, err: %v", filePath, p, err)
c.sendErrorMessage(c.connection.GetFsError(err))
return err
}

View file

@ -264,7 +264,7 @@ func (c Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.Server
// we'll set a Deadline for handshake to complete, the default is 2 minutes as OpenSSH
conn.SetDeadline(time.Now().Add(handshakeTimeout)) //nolint:errcheck
remoteAddr := conn.RemoteAddr()
if err := common.Config.ExecutePostConnectHook(remoteAddr, common.ProtocolSSH); err != nil {
if err := common.Config.ExecutePostConnectHook(remoteAddr.String(), common.ProtocolSSH); err != nil {
conn.Close()
return
}

View file

@ -110,6 +110,8 @@ iixITGvaNZh/tjAAAACW5pY29sYUBwMQE=
testCertExpired = "ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgU3TLP5285k20fBSsdZioI78oJUpaRXFlgx5IPg6gWg8AAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0AAAAAAAAABAAAAAEAAAAOdGVzdF91c2VyX3NmdHAAAAASAAAADnRlc3RfdXNlcl9zZnRwAAAAAEs93LgAAAAATR8QOAAAAAAAAACCAAAAFXBlcm1pdC1YMTEtZm9yd2FyZGluZwAAAAAAAAAXcGVybWl0LWFnZW50LWZvcndhcmRpbmcAAAAAAAAAFnBlcm1pdC1wb3J0LWZvcndhcmRpbmcAAAAAAAAACnBlcm1pdC1wdHkAAAAAAAAADnBlcm1pdC11c2VyLXJjAAAAAAAAAAAAAAGXAAAAB3NzaC1yc2EAAAADAQABAAABgQDF5fcwZHiyixmnE6IlOZJpZhWXoh62gN+yadAA0GJ509SAEaZVLPDP8S5RsE8mUikR3wxynVshxHeqMhrkS+RlNbhSlOXDdNg94yTrq/xF8Z/PgKRInvef74k5i7bAIytza7jERzFJ/ujTEy3537T5k5EYQJ15ZQGuvzynSdv+6o99SjI4jFplyQOZ2QcYbEAmhHm5GgQlIiEFG/RlDtLksOulKZxOY3qPzP0AyQxtZJXn/5vG40aW9LTbwxCJqWlgrkFXMqAAVCbuU5YspwhiXmKt1PsldiXw23oloa4caCKN1jzbFiGuZNXEU2Ebx7JIvjQCPaUYwLjEbkRDxDqN/vmwZqBuKYiuG9Eafx+nFSQkr7QYb5b+mT+/1IFHnmeRGn38731kBqtH7tpzC/t+soRX9p2HtJM+9MYhblO2OqTSPGTlxihWUkyiRBekpAhaiHld16TsG+A3bOJHrojGcX+5g6oGarKGLAMcykL1X+rZqT993Mo6d2Z7q43MOXEAAAGUAAAADHJzYS1zaGEyLTUxMgAAAYAlH3hhj8J6xLyVpeLZjblzwDKrxp/MWiH30hQ965ExPrPRcoAZFEKVqOYdj6bp4Q19Q4Yzqdobg3aN5ym2iH0b2TlOY0mM901CAoHbNJyiLs+0KiFRoJ+30EDj/hcKusg6v8ln2yixPagAyQu3zyiWo4t1ZuO3I86xchGlptStxSdHAHPFCfpbhcnzWFZctiMqUutl82C4ROWyjOZcRzdVdWHeN5h8wnooXuvba2VkT8QPmjYYyRGuQ3Hg+ySdh8Tel4wiix1Dg5MX7Wjh4hKEx80No9UPy+0iyZMNc07lsWAtrY6NRxGM5CzB6mklscB8TzFrVSnIl9u3bquLfaCrFt/Mft5dR7Yy4jmF+zUhjia6h6giCZ91J+FZ4hV+WkBtPCvTfrGWoA1BgEB/iI2xOq/NPqJ7UXRoMXk/l0NPgRPT2JS1adegqnt4ddr6IlmPyZxaSEvXhanjKdfMlEFYO1wz7ouqpYUozQVy4KXBlzFlNwyD1hI+k4+/A6AIYeI= nicola@p1"
configDir = ".."
osWindows = "windows"
testFileName = "test_file_sftp.dat"
testDLFileName = "test_download_sftp.dat"
)
var (
@ -318,7 +320,6 @@ func TestBasicSFTPHandling(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat" //nolint:goconst
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
expectedQuotaSize := user.UsedQuotaSize + testFileSize
@ -329,7 +330,7 @@ func TestBasicSFTPHandling(t *testing.T) {
assert.Error(t, err)
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
assert.NoError(t, err)
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = sftpDownloadFile(testFileName, localDownloadPath, testFileSize, client)
assert.NoError(t, err)
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
@ -363,7 +364,6 @@ func TestConcurrency(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
var wg sync.WaitGroup
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(262144)
err = createTestFile(testFilePath, testFileSize)
@ -474,7 +474,6 @@ func TestUploadResume(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
appendDataSize := int64(65535)
@ -486,7 +485,7 @@ func TestUploadResume(t *testing.T) {
assert.NoError(t, err)
err = sftpUploadResumeFile(testFilePath, testFileName, testFileSize+appendDataSize, false, client)
assert.NoError(t, err)
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = sftpDownloadFile(testFileName, localDownloadPath, testFileSize+appendDataSize, client)
assert.NoError(t, err)
initialHash, err := computeHashForFile(sha256.New(), testFilePath)
@ -564,7 +563,6 @@ func TestRemove(t *testing.T) {
assert.NoError(t, err)
err = client.Mkdir("/test/test1")
assert.NoError(t, err)
testFileName := "/test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -599,7 +597,6 @@ func TestLink(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -634,7 +631,6 @@ func TestStat(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -679,7 +675,6 @@ func TestStatChownChmod(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -716,7 +711,6 @@ func TestChtimes(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
testDir := "test"
@ -767,7 +761,6 @@ func TestEscapeHomeDir(t *testing.T) {
assert.Error(t, err, "reading a symbolic link outside home dir should not succeeded")
err = os.Remove(linkPath)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -809,7 +802,6 @@ func TestHomeSpecialChars(t *testing.T) {
if assert.NoError(t, err) {
defer client.Close()
assert.NoError(t, checkBasicSFTP(client))
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -1494,7 +1486,6 @@ func TestLoginExternalAuthPwdAndPubKey(t *testing.T) {
client, err := getSftpClient(u, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -1568,7 +1559,6 @@ func TestExternalAuthDifferentUsername(t *testing.T) {
client, err := getSftpClient(u, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -1812,7 +1802,6 @@ func TestQuotaDisabledError(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -1871,7 +1860,6 @@ func TestQuotaFileReplace(t *testing.T) {
err = os.RemoveAll(user.GetHomeDir())
assert.NoError(t, err)
testFileSize := int64(65535)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
@ -1912,6 +1900,7 @@ func TestQuotaFileReplace(t *testing.T) {
assert.NoError(t, err)
client, err = getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
assert.Error(t, err, "quota size exceeded, file upload must fail")
err = client.Remove(testFileName)
@ -1935,7 +1924,6 @@ func TestQuotaRename(t *testing.T) {
assert.NoError(t, err)
testFileSize := int64(65535)
testFileSize1 := int64(65537)
testFileName := "test_file.dat"
testFileName1 := "test_file1.dat" //nolint:goconst
testFilePath := filepath.Join(homeBasePath, testFileName)
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
@ -2019,7 +2007,6 @@ func TestQuotaScan(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -2035,8 +2022,13 @@ func TestQuotaScan(t *testing.T) {
assert.NoError(t, err)
_, err = httpd.StartQuotaScan(user, http.StatusCreated)
assert.NoError(t, err)
err = waitQuotaScans(1)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
scans, _, err := httpd.GetQuotaScans(http.StatusOK)
if err == nil {
return len(scans) == 0
}
return false
}, 1*time.Second, 50*time.Millisecond)
user, _, err = httpd.GetUserByID(user.ID, http.StatusOK)
assert.NoError(t, err)
assert.Equal(t, expectedQuotaFiles, user.UsedQuotaFiles)
@ -2065,7 +2057,6 @@ func TestQuotaLimits(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileSize := int64(65535)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -2142,10 +2133,10 @@ func TestQuotaLimits(t *testing.T) {
func TestBandwidthAndConnections(t *testing.T) {
usePubKey := false
testFileSize := int64(131072)
testFileSize := int64(524288)
u := getTestUser(usePubKey)
u.UploadBandwidth = 30
u.DownloadBandwidth = 25
u.UploadBandwidth = 120
u.DownloadBandwidth = 100
wantedUploadElapsed := 1000 * (testFileSize / 1000) / u.UploadBandwidth
wantedDownloadElapsed := 1000 * (testFileSize / 1000) / u.DownloadBandwidth
// 100 ms tolerance
@ -2156,7 +2147,6 @@ func TestBandwidthAndConnections(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -2166,9 +2156,9 @@ func TestBandwidthAndConnections(t *testing.T) {
elapsed := time.Since(startTime).Nanoseconds() / 1000000
assert.GreaterOrEqual(t, elapsed, wantedUploadElapsed, "upload bandwidth throttling not respected")
startTime = time.Now()
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
c := sftpDownloadNonBlocking(testFileName, localDownloadPath, testFileSize, client)
waitForActiveTransfer()
waitForActiveTransfers(t)
// wait some additional arbitrary time to wait for transfer activity to happen
// it is need to reach all the code in CheckIdleConnections
time.Sleep(100 * time.Millisecond)
@ -2178,15 +2168,15 @@ func TestBandwidthAndConnections(t *testing.T) {
assert.GreaterOrEqual(t, elapsed, wantedDownloadElapsed, "download bandwidth throttling not respected")
// test disconnection
c = sftpUploadNonBlocking(testFilePath, testFileName+"_partial", testFileSize, client)
waitForActiveTransfer()
waitForActiveTransfers(t)
time.Sleep(100 * time.Millisecond)
stats := common.Connections.GetStats()
for _, stat := range stats {
for _, stat := range common.Connections.GetStats() {
common.Connections.Close(stat.ConnectionID)
}
err = <-c
assert.Error(t, err, "connection closed while uploading: the upload must fail")
waitForNoActiveTransfer()
assert.Eventually(t, func() bool { return len(common.Connections.GetStats()) == 0 }, 1*time.Second, 50*time.Millisecond)
err = os.Remove(testFilePath)
assert.NoError(t, err)
err = os.Remove(localDownloadPath)
@ -2204,9 +2194,8 @@ func TestExtensionsFilters(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileSize := int64(131072)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
@ -2276,11 +2265,10 @@ func TestVirtualFolders(t *testing.T) {
if assert.NoError(t, err) {
defer client.Close()
testFileSize := int64(131072)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = sftpUploadFile(testFilePath, path.Join(vdirPath, testFileName), testFileSize, client)
assert.NoError(t, err)
err = sftpDownloadFile(path.Join(vdirPath, testFileName), localDownloadPath, testFileSize, client)
@ -2381,7 +2369,6 @@ func TestVirtualFoldersQuotaLimit(t *testing.T) {
QuotaSize: 0,
})
testFileSize := int64(131072)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err := createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -2472,7 +2459,6 @@ func TestVirtualFoldersQuotaLimit(t *testing.T) {
func TestVirtualFoldersQuotaRenameOverwrite(t *testing.T) {
usePubKey := true
testFileSize := int64(131072)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize1 := int64(65537)
testFileName1 := "test_file1.dat"
@ -2649,7 +2635,6 @@ func TestVirtualFoldersQuotaValues(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFileSize := int64(131072)
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
@ -2757,7 +2742,6 @@ func TestQuotaRenameInsideSameVirtualFolder(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFileName1 := "test_file1.dat"
testFileSize := int64(131072)
testFileSize1 := int64(65535)
@ -2981,7 +2965,6 @@ func TestQuotaRenameBetweenVirtualFolder(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFileName1 := "test_file1.dat"
testFileSize := int64(131072)
testFileSize1 := int64(65535)
@ -3225,7 +3208,6 @@ func TestQuotaRenameFromVirtualFolder(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFileName1 := "test_file1.dat"
testFileSize := int64(131072)
testFileSize1 := int64(65535)
@ -3472,7 +3454,6 @@ func TestQuotaRenameToVirtualFolder(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFileName1 := "test_file1.dat"
testFileSize := int64(131072)
testFileSize1 := int64(65535)
@ -3731,7 +3712,6 @@ func TestVirtualFoldersLink(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFileSize := int64(131072)
testFilePath := filepath.Join(homeBasePath, testFileName)
testDir := "adir"
@ -3830,7 +3810,6 @@ func TestOverlappedMappedFolders(t *testing.T) {
defer client.Close()
err = checkBasicSFTP(client)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFileSize := int64(131072)
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
@ -3986,7 +3965,6 @@ func TestVirtualFolderQuotaScan(t *testing.T) {
err := os.MkdirAll(mappedPath, os.ModePerm)
assert.NoError(t, err)
testFileSize := int64(65535)
testFileName := "test_file.dat"
testFilePath := filepath.Join(mappedPath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -3998,8 +3976,13 @@ func TestVirtualFolderQuotaScan(t *testing.T) {
assert.NoError(t, err)
_, err = httpd.StartFolderQuotaScan(folder, http.StatusCreated)
assert.NoError(t, err)
err = waitQuotaScans(1)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
scans, _, err := httpd.GetFoldersQuotaScans(http.StatusOK)
if err == nil {
return len(scans) == 0
}
return false
}, 1*time.Second, 50*time.Millisecond)
folders, _, err := httpd.GetFolders(0, 0, mappedPath, http.StatusOK)
assert.NoError(t, err)
if assert.Len(t, folders, 1) {
@ -4058,7 +4041,6 @@ func TestVFolderQuotaSize(t *testing.T) {
assert.NoError(t, err)
err = os.MkdirAll(mappedPath2, os.ModePerm)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -4160,7 +4142,7 @@ func TestMissingFile(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = sftpDownloadFile("missing_file", localDownloadPath, 0, client)
assert.Error(t, err, "download missing file must fail")
err = os.Remove(localDownloadPath)
@ -4192,13 +4174,12 @@ func TestOpenError(t *testing.T) {
err = os.Chmod(user.GetHomeDir(), os.ModePerm)
assert.NoError(t, err)
testFileSize := int64(65535)
testFileName := "test_file.dat"
testFilePath := filepath.Join(user.GetHomeDir(), testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
_, err = client.Stat(testFileName)
assert.NoError(t, err)
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = sftpDownloadFile(testFileName, localDownloadPath, testFileSize, client)
assert.NoError(t, err)
err = os.Chmod(testFilePath, 0001)
@ -4241,7 +4222,6 @@ func TestOverwriteDirWithFile(t *testing.T) {
if assert.NoError(t, err) {
defer client.Close()
testFileSize := int64(65535)
testFileName := "test_file.dat"
testDirName := "test_dir" //nolint:goconst
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
@ -4370,14 +4350,13 @@ func TestPermDownload(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
assert.NoError(t, err)
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = sftpDownloadFile(testFileName, localDownloadPath, testFileSize, client)
assert.Error(t, err, "file download without permission should not succeed")
err = client.Remove(testFileName)
@ -4404,7 +4383,6 @@ func TestPermUpload(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -4431,7 +4409,6 @@ func TestPermOverwrite(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -4460,7 +4437,6 @@ func TestPermDelete(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -4490,7 +4466,6 @@ func TestPermRename(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -4524,7 +4499,6 @@ func TestPermRenameOverwrite(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -4582,7 +4556,6 @@ func TestPermSymlink(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -4613,7 +4586,6 @@ func TestPermChmod(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -4645,7 +4617,6 @@ func TestPermChown(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -4677,7 +4648,6 @@ func TestPermChtimes(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -4709,7 +4679,6 @@ func TestSubDirsUploads(t *testing.T) {
defer client.Close()
err = client.Mkdir("subdir")
assert.NoError(t, err)
testFileName := "test_file.dat"
testFileNameSub := "/subdir/test_file_dat"
testSubFile := filepath.Join(user.GetHomeDir(), "subdir", "file.dat")
testDir := "testdir"
@ -4820,7 +4789,7 @@ func TestSubDirsDownloads(t *testing.T) {
assert.NoError(t, err)
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
assert.NoError(t, err)
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
localDownloadPath := filepath.Join(homeBasePath, testDLFileName)
err = sftpDownloadFile(testFileName, localDownloadPath, testFileSize, client)
if assert.Error(t, err) {
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
@ -5433,7 +5402,6 @@ func TestSSHFileHash(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -5511,7 +5479,6 @@ func TestSSHCopy(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFileSize := int64(131072)
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileName1 := "test_file1.dat"
@ -5704,7 +5671,6 @@ func TestSSHCopyPermissions(t *testing.T) {
defer client.Close()
testDir := "tDir"
testFileSize := int64(131072)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -5796,7 +5762,6 @@ func TestSSHCopyQuotaLimits(t *testing.T) {
if assert.NoError(t, err) {
defer client.Close()
testDir := "testDir"
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileName1 := "test_file1.dat"
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
@ -5948,7 +5913,6 @@ func TestSSHRemove(t *testing.T) {
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
testFileName := "test_file.dat"
testFileSize := int64(131072)
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileName1 := "test_file1.dat"
@ -6137,7 +6101,6 @@ func TestGitQuotaVirtualFolders(t *testing.T) {
if assert.NoError(t, err) {
// we upload a file so the user is over quota
defer client.Close()
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, u.QuotaSize)
assert.NoError(t, err)
@ -6212,7 +6175,6 @@ func TestSCPBasicHandling(t *testing.T) {
u.QuotaSize = 6553600
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(131074)
expectedQuotaSize := user.UsedQuotaSize + testFileSize
@ -6258,7 +6220,6 @@ func TestSCPUploadFileOverwrite(t *testing.T) {
assert.NoError(t, err)
err = os.RemoveAll(user.GetHomeDir())
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(32760)
err = createTestFile(testFilePath, testFileSize)
@ -6319,7 +6280,6 @@ func TestSCPRecursive(t *testing.T) {
u := getTestUser(usePubKey)
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testBaseDirName := "test_dir"
testBaseDirPath := filepath.Join(homeBasePath, testBaseDirName)
testBaseDirDownName := "test_dir_down" //nolint:goconst
@ -6380,7 +6340,6 @@ func TestSCPExtensionsFilter(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileSize := int64(131072)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
localPath := filepath.Join(homeBasePath, "scp_download.dat")
remoteUpPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, "/")
@ -6434,7 +6393,6 @@ func TestSCPVirtualFolders(t *testing.T) {
assert.NoError(t, err)
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testBaseDirName := "test_dir"
testBaseDirPath := filepath.Join(homeBasePath, testBaseDirName)
testBaseDirDownName := "test_dir_down"
@ -6446,7 +6404,7 @@ func TestSCPVirtualFolders(t *testing.T) {
assert.NoError(t, err)
err = createTestFile(testFilePath1, testFileSize)
assert.NoError(t, err)
remoteDownPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, path.Join("/", vdirPath))
remoteDownPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, vdirPath)
remoteUpPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, vdirPath)
err = scpUpload(testBaseDirPath, remoteUpPath, true, false)
assert.NoError(t, err)
@ -6500,7 +6458,6 @@ func TestSCPVirtualFoldersQuota(t *testing.T) {
assert.NoError(t, err)
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testBaseDirName := "test_dir"
testBaseDirPath := filepath.Join(homeBasePath, testBaseDirName)
testBaseDirDownName := "test_dir_down"
@ -6617,7 +6574,6 @@ func TestSCPPermCreateDirs(t *testing.T) {
u.Permissions["/"] = []string{dataprovider.PermDownload, dataprovider.PermUpload}
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(32760)
testBaseDirName := "test_dir"
@ -6652,7 +6608,6 @@ func TestSCPPermUpload(t *testing.T) {
u.Permissions["/"] = []string{dataprovider.PermDownload, dataprovider.PermCreateDirs}
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65536)
err = createTestFile(testFilePath, testFileSize)
@ -6677,7 +6632,6 @@ func TestSCPPermOverwrite(t *testing.T) {
u.Permissions["/"] = []string{dataprovider.PermUpload, dataprovider.PermCreateDirs}
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65536)
err = createTestFile(testFilePath, testFileSize)
@ -6705,7 +6659,6 @@ func TestSCPPermDownload(t *testing.T) {
u.Permissions["/"] = []string{dataprovider.PermUpload, dataprovider.PermCreateDirs}
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65537)
err = createTestFile(testFilePath, testFileSize)
@ -6737,7 +6690,6 @@ func TestSCPQuotaSize(t *testing.T) {
u.QuotaSize = testFileSize + 1
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -6796,7 +6748,6 @@ func TestSCPEscapeHomeDir(t *testing.T) {
linkPath := filepath.Join(homeBasePath, defaultUsername, testDir)
err = os.Symlink(homeBasePath, linkPath)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -6827,7 +6778,6 @@ func TestSCPUploadPaths(t *testing.T) {
usePubKey := true
user, _, err := httpd.AddUser(getTestUser(usePubKey), http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
testDirName := "testDir"
@ -6863,7 +6813,6 @@ func TestSCPOverwriteDirWithFile(t *testing.T) {
usePubKey := true
user, _, err := httpd.AddUser(getTestUser(usePubKey), http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
testDirPath := filepath.Join(user.GetHomeDir(), testFileName)
@ -6896,7 +6845,6 @@ func TestSCPRemoteToRemote(t *testing.T) {
u.HomeDir += "1"
user1, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
testFileSize := int64(65535)
err = createTestFile(testFilePath, testFileSize)
@ -6925,7 +6873,6 @@ func TestSCPErrors(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileSize := int64(524288)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
@ -6943,25 +6890,25 @@ func TestSCPErrors(t *testing.T) {
err := cmd.Run()
assert.Error(t, err, "SCP download must fail")
}()
waitForActiveTransfer()
waitForActiveTransfers(t)
// wait some additional arbitrary time to wait for transfer activity to happen
// it is need to reach all the code in CheckIdleConnections
time.Sleep(100 * time.Millisecond)
err = cmd.Process.Kill()
assert.NoError(t, err)
waitForNoActiveTransfer()
assert.Eventually(t, func() bool { return len(common.Connections.GetStats()) == 0 }, 1*time.Second, 50*time.Millisecond)
cmd = getScpUploadCommand(testFilePath, remoteUpPath, false, false)
go func() {
err := cmd.Run()
assert.Error(t, err, "SCP upload must fail")
}()
waitForActiveTransfer()
waitForActiveTransfers(t)
// wait some additional arbitrary time to wait for transfer activity to happen
// it is need to reach all the code in CheckIdleConnections
time.Sleep(100 * time.Millisecond)
err = cmd.Process.Kill()
assert.NoError(t, err)
waitForNoActiveTransfer()
assert.Eventually(t, func() bool { return len(common.Connections.GetStats()) == 0 }, 1*time.Second, 50*time.Millisecond)
err = os.Remove(testFilePath)
assert.NoError(t, err)
os.Remove(localPath)
@ -7362,53 +7309,15 @@ func computeHashForFile(hasher hash.Hash, path string) (string, error) {
return hash, err
}
func waitForNoActiveTransfer() {
for len(common.Connections.GetStats()) > 0 {
time.Sleep(100 * time.Millisecond)
}
}
func waitForActiveTransfer() {
stats := common.Connections.GetStats()
for len(stats) < 1 {
stats = common.Connections.GetStats()
}
activeTransferFound := false
for !activeTransferFound {
stats = common.Connections.GetStats()
if len(stats) == 0 {
break
}
for _, stat := range stats {
func waitForActiveTransfers(t *testing.T) {
assert.Eventually(t, func() bool {
for _, stat := range common.Connections.GetStats() {
if len(stat.Transfers) > 0 {
activeTransferFound = true
return true
}
}
}
}
func waitQuotaScans(kind int) error {
for {
time.Sleep(50 * time.Millisecond)
var activeScans int
if kind == 1 {
scans, _, err := httpd.GetQuotaScans(http.StatusOK)
if err != nil {
return err
}
activeScans = len(scans)
} else {
scans, _, err := httpd.GetFoldersQuotaScans(http.StatusOK)
if err != nil {
return err
}
activeScans = len(scans)
}
if activeScans == 0 {
break
}
}
return nil
return false
}, 1*time.Second, 50*time.Millisecond)
}
func checkSystemCommands() {

View file

@ -521,7 +521,7 @@ func (c *sshCommand) getSourcePath() string {
func cleanCommandPath(name string) string {
name = strings.Trim(name, "'")
name = strings.Trim(name, "\"")
result := utils.CleanSFTPPath(name)
result := utils.CleanPath(name)
if strings.HasSuffix(name, "/") && !strings.HasSuffix(result, "/") {
result += "/"
}

View file

@ -45,6 +45,12 @@
"certificate_file": "",
"certificate_key_file": ""
},
"webdavd": {
"bind_port": 0,
"bind_address": "",
"certificate_file": "",
"certificate_key_file": ""
},
"data_provider": {
"driver": "sqlite",
"name": "sftpgo.db",

View file

@ -254,7 +254,7 @@ func GenerateECDSAKeys(file string) error {
// for example if the path is: /1/2/3/4 it returns:
// [ "/1/2/3/4", "/1/2/3", "/1/2", "/1", "/" ]
func GetDirsForSFTPPath(p string) []string {
sftpPath := CleanSFTPPath(p)
sftpPath := CleanPath(p)
dirsForPath := []string{sftpPath}
for {
if sftpPath == "/" {
@ -266,13 +266,13 @@ func GetDirsForSFTPPath(p string) []string {
return dirsForPath
}
// CleanSFTPPath returns a cleaned SFTP path
func CleanSFTPPath(p string) string {
sftpPath := filepath.ToSlash(p)
// CleanPath returns a clean POSIX (/) absolute path to work with
func CleanPath(p string) string {
p = filepath.ToSlash(p)
if !path.IsAbs(p) {
sftpPath = "/" + sftpPath
p = "/" + p
}
return path.Clean(sftpPath)
return path.Clean(p)
}
// LoadTemplate wraps a call to a function returning (*Template, error)

View file

@ -2,29 +2,39 @@ package vfs
import (
"os"
"path"
"time"
)
// FileInfo implements os.FileInfo for a file in S3.
// FileContentTyper is an optional interface for vfs.FileInfo
type FileContentTyper interface {
GetContentType() string
}
// FileInfo implements os.FileInfo for a Cloud Storage file.
type FileInfo struct {
name string
sizeInBytes int64
modTime time.Time
mode os.FileMode
contentType string
}
// NewFileInfo creates file info.
func NewFileInfo(name string, isDirectory bool, sizeInBytes int64, modTime time.Time) FileInfo {
mode := os.FileMode(0644)
contentType := ""
if isDirectory {
mode = os.FileMode(0755) | os.ModeDir
contentType = "inode/directory"
}
return FileInfo{
name: name,
name: path.Base(name), // we have always Unix style paths here
sizeInBytes: sizeInBytes,
modTime: modTime,
mode: mode,
contentType: contentType,
}
}
@ -57,3 +67,12 @@ func (fi FileInfo) IsDir() bool {
func (fi FileInfo) Sys() interface{} {
return fi.getFileInfoSys()
}
func (fi *FileInfo) setContentType(contenType string) {
fi.contentType = contenType
}
// GetContentType implements FileContentTyper interface
func (fi FileInfo) GetContentType() string {
return fi.contentType
}

View file

@ -86,10 +86,10 @@ func (fs GCSFs) Stat(name string) (os.FileInfo, error) {
if err != nil {
return result, err
}
return NewFileInfo(name, true, 0, time.Time{}), nil
return NewFileInfo(name, true, 0, time.Now()), nil
}
if fs.config.KeyPrefix == name+"/" {
return NewFileInfo(name, true, 0, time.Time{}), nil
return NewFileInfo(name, true, 0, time.Now()), nil
}
prefix := fs.getPrefixForStat(name)
query := &storage.Query{Prefix: prefix, Delimiter: "/"}
@ -108,7 +108,8 @@ func (fs GCSFs) Stat(name string) (os.FileInfo, error) {
}
if len(attrs.Prefix) > 0 {
if fs.isEqual(attrs.Prefix, name) {
result = NewFileInfo(name, true, 0, time.Time{})
result = NewFileInfo(name, true, 0, time.Now())
break
}
} else {
if !attrs.Deleted.IsZero() {
@ -117,6 +118,10 @@ func (fs GCSFs) Stat(name string) (os.FileInfo, error) {
if fs.isEqual(attrs.Name, name) {
isDir := strings.HasSuffix(attrs.Name, "/")
result = NewFileInfo(name, isDir, attrs.Size, attrs.Updated)
if !isDir {
result.setContentType(attrs.ContentType)
}
break
}
}
}
@ -134,7 +139,7 @@ func (fs GCSFs) Lstat(name string) (os.FileInfo, error) {
// Open opens the named file for reading
func (fs GCSFs) Open(name string, offset int64) (*os.File, *pipeat.PipeReaderAt, func(), error) {
r, w, err := pipeat.AsyncWriterPipeInDir(fs.localTempDir)
r, w, err := pipeat.PipeInDir(fs.localTempDir)
if err != nil {
return nil, nil, nil, err
}
@ -156,7 +161,7 @@ func (fs GCSFs) Open(name string, offset int64) (*os.File, *pipeat.PipeReaderAt,
defer cancelFn()
defer objectReader.Close()
n, err := io.Copy(w, objectReader)
w.CloseWithError(err) //nolint:errcheck // the returned error is always null
w.CloseWithError(err) //nolint:errcheck
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
metrics.GCSTransferCompleted(n, 1, err)
}()
@ -181,7 +186,7 @@ func (fs GCSFs) Create(name string, flag int) (*os.File, *PipeWriter, func(), er
defer cancelFn()
defer objectWriter.Close()
n, err := io.Copy(objectWriter, r)
r.CloseWithError(err) //nolint:errcheck // the returned error is always null
r.CloseWithError(err) //nolint:errcheck
p.Done(err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v", name, n, err)
metrics.GCSTransferCompleted(n, 0, err)
@ -321,7 +326,7 @@ func (fs GCSFs) ReadDir(dirname string) ([]os.FileInfo, error) {
}
if len(attrs.Prefix) > 0 {
name, _ := fs.resolve(attrs.Prefix, prefix)
result = append(result, NewFileInfo(name, true, 0, time.Time{}))
result = append(result, NewFileInfo(name, true, 0, time.Now()))
} else {
name, isDir := fs.resolve(attrs.Name, prefix)
if len(name) == 0 {
@ -330,7 +335,11 @@ func (fs GCSFs) ReadDir(dirname string) ([]os.FileInfo, error) {
if !attrs.Deleted.IsZero() {
continue
}
result = append(result, NewFileInfo(name, isDir, attrs.Size, attrs.Updated))
fi := NewFileInfo(name, isDir, attrs.Size, attrs.Updated)
if !isDir {
fi.setContentType(attrs.ContentType)
}
result = append(result, fi)
}
}
metrics.GCSListObjectsCompleted(nil)
@ -381,12 +390,11 @@ func (GCSFs) IsPermission(err error) bool {
return strings.Contains(err.Error(), "403")
}
// CheckRootPath creates the specified root directory if it does not exists
// CheckRootPath creates the specified local root directory if it does not exists
func (fs GCSFs) CheckRootPath(username string, uid int, gid int) bool {
// we need a local directory for temporary files
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, nil)
osFs.CheckRootPath(username, uid, gid)
return fs.checkIfBucketExists() != nil
return osFs.CheckRootPath(username, uid, gid)
}
// ScanRootDirContents returns the number of files contained in the bucket,
@ -455,8 +463,53 @@ func (fs GCSFs) GetRelativePath(name string) string {
// Walk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root
func (GCSFs) Walk(root string, walkFn filepath.WalkFunc) error {
return errUnsupported
func (fs GCSFs) Walk(root string, walkFn filepath.WalkFunc) error {
prefix := ""
if len(root) > 0 && root != "." {
prefix = strings.TrimPrefix(root, "/")
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
}
query := &storage.Query{Prefix: prefix}
err := query.SetAttrSelection(gcsDefaultFieldsSelection)
if err != nil {
walkFn(root, nil, err) //nolint:errcheck
return err
}
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
defer cancelFn()
bkt := fs.svc.Bucket(fs.config.Bucket)
it := bkt.Objects(ctx, query)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
walkFn(root, nil, err) //nolint:errcheck
metrics.GCSListObjectsCompleted(err)
return err
}
if !attrs.Deleted.IsZero() {
continue
}
isDir := strings.HasSuffix(attrs.Name, "/")
name := path.Clean(attrs.Name)
if len(name) == 0 {
continue
}
err = walkFn(attrs.Name, NewFileInfo(name, isDir, attrs.Size, attrs.Updated), nil)
if err != nil {
break
}
}
walkFn(root, NewFileInfo(root, true, 0, time.Now()), err) //nolint:errcheck
metrics.GCSListObjectsCompleted(err)
return err
}
// Join joins any number of path elements into a single path

View file

@ -49,13 +49,33 @@ func (fs OsFs) ConnectionID() string {
}
// Stat returns a FileInfo describing the named file
func (OsFs) Stat(name string) (os.FileInfo, error) {
return os.Stat(name)
func (fs OsFs) Stat(name string) (os.FileInfo, error) {
fi, err := os.Stat(name)
if err != nil {
return fi, err
}
for _, v := range fs.virtualFolders {
if v.MappedPath == name {
info := NewFileInfo(v.VirtualPath, true, fi.Size(), fi.ModTime())
return info, nil
}
}
return fi, err
}
// Lstat returns a FileInfo describing the named file
func (OsFs) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(name)
func (fs OsFs) Lstat(name string) (os.FileInfo, error) {
fi, err := os.Lstat(name)
if err != nil {
return fi, err
}
for _, v := range fs.virtualFolders {
if v.MappedPath == name {
info := NewFileInfo(v.VirtualPath, true, fi.Size(), fi.ModTime())
return info, nil
}
}
return fi, err
}
// Open opens the named file for reading
@ -292,7 +312,7 @@ func (fs *OsFs) GetFsPaths(sftpPath string) (string, string) {
virtualPath, mappedPath := fs.getMappedFolderForPath(sftpPath)
if len(mappedPath) > 0 {
basePath = mappedPath
sftpPath = strings.TrimPrefix(utils.CleanSFTPPath(sftpPath), virtualPath)
sftpPath = strings.TrimPrefix(utils.CleanPath(sftpPath), virtualPath)
}
r := filepath.Clean(filepath.Join(basePath, sftpPath))
return basePath, r

View file

@ -112,10 +112,10 @@ func (fs S3Fs) Stat(name string) (os.FileInfo, error) {
if err != nil {
return result, err
}
return NewFileInfo(name, true, 0, time.Time{}), nil
return NewFileInfo(name, true, 0, time.Now()), nil
}
if "/"+fs.config.KeyPrefix == name+"/" {
return NewFileInfo(name, true, 0, time.Time{}), nil
return NewFileInfo(name, true, 0, time.Now()), nil
}
prefix := path.Dir(name)
if prefix == "/" || prefix == "." {
@ -135,7 +135,7 @@ func (fs S3Fs) Stat(name string) (os.FileInfo, error) {
}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, p := range page.CommonPrefixes {
if fs.isEqual(p.Prefix, name) {
result = NewFileInfo(name, true, 0, time.Time{})
result = NewFileInfo(name, true, 0, time.Now())
return false
}
}
@ -164,7 +164,7 @@ func (fs S3Fs) Lstat(name string) (os.FileInfo, error) {
// Open opens the named file for reading
func (fs S3Fs) Open(name string, offset int64) (*os.File, *pipeat.PipeReaderAt, func(), error) {
r, w, err := pipeat.AsyncWriterPipeInDir(fs.localTempDir)
r, w, err := pipeat.PipeInDir(fs.localTempDir)
if err != nil {
return nil, nil, nil, err
}
@ -182,7 +182,7 @@ func (fs S3Fs) Open(name string, offset int64) (*os.File, *pipeat.PipeReaderAt,
Key: aws.String(name),
Range: streamRange,
})
w.CloseWithError(err) //nolint:errcheck // the returned error is always null
w.CloseWithError(err) //nolint:errcheck
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
metrics.S3TransferCompleted(n, 1, err)
}()
@ -210,7 +210,7 @@ func (fs S3Fs) Create(name string, flag int) (*os.File, *PipeWriter, func(), err
u.Concurrency = fs.config.UploadConcurrency
u.PartSize = fs.config.UploadPartSize
})
r.CloseWithError(err) //nolint:errcheck // the returned error is always null
r.CloseWithError(err) //nolint:errcheck
p.Done(err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, response: %v, readed bytes: %v, err: %+v",
name, response, r.GetReadedBytes(), err)
@ -351,7 +351,7 @@ func (fs S3Fs) ReadDir(dirname string) ([]os.FileInfo, error) {
}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, p := range page.CommonPrefixes {
name, isDir := fs.resolve(p.Prefix, prefix)
result = append(result, NewFileInfo(name, isDir, 0, time.Time{}))
result = append(result, NewFileInfo(name, isDir, 0, time.Now()))
}
for _, fileObject := range page.Contents {
objectSize := *fileObject.Size
@ -415,12 +415,11 @@ func (S3Fs) IsPermission(err error) bool {
return strings.Contains(err.Error(), "403")
}
// CheckRootPath creates the specified root directory if it does not exists
// CheckRootPath creates the specified local root directory if it does not exists
func (fs S3Fs) CheckRootPath(username string, uid int, gid int) bool {
// we need a local directory for temporary files
osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, nil)
osFs.CheckRootPath(username, uid, gid)
return fs.checkIfBucketExists() != nil
return osFs.CheckRootPath(username, uid, gid)
}
// ScanRootDirContents returns the number of files contained in the bucket,
@ -476,9 +475,40 @@ func (fs S3Fs) GetRelativePath(name string) string {
}
// Walk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root
func (S3Fs) Walk(root string, walkFn filepath.WalkFunc) error {
return errUnsupported
// directory in the tree, including root. The result are unordered
func (fs S3Fs) Walk(root string, walkFn filepath.WalkFunc) error {
prefix := ""
if root != "/" && root != "." {
prefix = strings.TrimPrefix(root, "/")
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
}
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
defer cancelFn()
err := fs.svc.ListObjectsV2PagesWithContext(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(fs.config.Bucket),
Prefix: aws.String(prefix),
}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, fileObject := range page.Contents {
objectSize := *fileObject.Size
objectModTime := *fileObject.LastModified
isDir := strings.HasSuffix(*fileObject.Key, "/")
name := path.Clean(*fileObject.Key)
if len(name) == 0 {
continue
}
err := walkFn(fs.Join("/", *fileObject.Key), NewFileInfo(name, isDir, objectSize, objectModTime), nil)
if err != nil {
return false
}
}
return true
})
metrics.S3ListObjectsCompleted(err)
walkFn(root, NewFileInfo(root, true, 0, time.Now()), err) //nolint:errcheck
return err
}
// Join joins any number of path elements into a single path
@ -535,3 +565,17 @@ func (fs *S3Fs) checkIfBucketExists() error {
metrics.S3HeadBucketCompleted(err)
return err
}
// GetMimeType implements MimeTyper interface
func (fs S3Fs) GetMimeType(name string) (string, error) {
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
defer cancelFn()
obj, err := fs.svc.HeadObjectWithContext(ctx, &s3.HeadObjectInput{
Bucket: aws.String(fs.config.Bucket),
Key: aws.String(name),
})
if err != nil {
return "", err
}
return *obj.ContentType, err
}

View file

@ -47,6 +47,11 @@ type Fs interface {
HasVirtualFolders() bool
}
// MimeTyper defines an optional interface to get the content type
type MimeTyper interface {
GetMimeType(name string) (string, error)
}
var errUnsupported = errors.New("Not supported")
// QuotaCheckResult defines the result for a quota check

304
webdavd/file.go Normal file
View file

@ -0,0 +1,304 @@
package webdavd
import (
"context"
"errors"
"io"
"mime"
"os"
"path"
"sync/atomic"
"time"
"github.com/eikenb/pipeat"
"golang.org/x/net/webdav"
"github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/vfs"
)
var errTransferAborted = errors.New("transfer aborted")
type webDavFile struct {
*common.BaseTransfer
writer io.WriteCloser
reader io.ReadCloser
isFinished bool
maxWriteSize int64
startOffset int64
info os.FileInfo
fs vfs.Fs
}
func newWebDavFile(baseTransfer *common.BaseTransfer, pipeWriter *vfs.PipeWriter, pipeReader *pipeat.PipeReaderAt,
maxWriteSize int64, info os.FileInfo, fs vfs.Fs) *webDavFile {
var writer io.WriteCloser
var reader io.ReadCloser
if baseTransfer.File != nil {
writer = baseTransfer.File
reader = baseTransfer.File
} else if pipeWriter != nil {
writer = pipeWriter
} else if pipeReader != nil {
reader = pipeReader
}
return &webDavFile{
BaseTransfer: baseTransfer,
writer: writer,
reader: reader,
isFinished: false,
maxWriteSize: maxWriteSize,
startOffset: 0,
info: info,
fs: fs,
}
}
type webDavFileInfo struct {
os.FileInfo
file *webDavFile
}
// ContentType implements webdav.ContentTyper interface
func (fi webDavFileInfo) ContentType(ctx context.Context) (string, error) {
var contentType string
if c, ok := fi.FileInfo.(vfs.FileContentTyper); ok {
contentType = c.GetContentType()
}
if len(contentType) > 0 {
return contentType, nil
}
contentType = mime.TypeByExtension(path.Ext(fi.file.GetVirtualPath()))
if len(contentType) > 0 {
return contentType, nil
}
if c, ok := fi.file.fs.(vfs.MimeTyper); ok {
contentType, err := c.GetMimeType(fi.file.GetFsPath())
return contentType, err
}
return contentType, webdav.ErrNotImplemented
}
// Readdir reads directory entries from the handle
func (f *webDavFile) Readdir(count int) ([]os.FileInfo, error) {
if f.isDir() {
return f.Connection.ListDir(f.GetFsPath(), f.GetVirtualPath())
}
return nil, errors.New("we can only list directories contents, this is not a directory")
}
// Stat the handle
func (f *webDavFile) Stat() (os.FileInfo, error) {
if f.info != nil {
fi := webDavFileInfo{
FileInfo: f.info,
file: f,
}
return fi, nil
}
f.Lock()
closed := f.isFinished
errUpload := f.ErrTransfer
f.Unlock()
if f.GetType() == common.TransferUpload && closed && errUpload == nil {
info := webDavFileInfo{
FileInfo: vfs.NewFileInfo(f.GetFsPath(), false, atomic.LoadInt64(&f.BytesReceived), time.Now()),
file: f,
}
return info, nil
}
info, err := f.fs.Stat(f.GetFsPath())
if err != nil {
return info, err
}
fi := webDavFileInfo{
FileInfo: info,
file: f,
}
return fi, err
}
// Read reads the contents to downloads.
func (f *webDavFile) Read(p []byte) (n int, err error) {
if atomic.LoadInt32(&f.AbortTransfer) == 1 {
return 0, errTransferAborted
}
f.Connection.UpdateLastActivity()
// the file is read sequentially we don't need to check for concurrent reads and so
// lock the transfer while opening the remote file
if f.reader == nil {
if f.GetType() != common.TransferDownload {
f.TransferError(common.ErrOpUnsupported)
return 0, common.ErrOpUnsupported
}
_, r, cancelFn, err := f.fs.Open(f.GetFsPath(), 0)
f.Lock()
f.reader = r
f.ErrTransfer = err
f.BaseTransfer.SetCancelFn(cancelFn)
f.startOffset = 0
f.Unlock()
if err != nil {
return 0, err
}
}
var readed int
var e error
readed, e = f.reader.Read(p)
atomic.AddInt64(&f.BytesSent, int64(readed))
if e != nil && e != io.EOF {
f.TransferError(e)
return readed, e
}
f.HandleThrottle()
return readed, e
}
// Write writes the uploaded contents.
func (f *webDavFile) Write(p []byte) (n int, err error) {
if atomic.LoadInt32(&f.AbortTransfer) == 1 {
return 0, errTransferAborted
}
f.Connection.UpdateLastActivity()
var written int
var e error
written, e = f.writer.Write(p)
atomic.AddInt64(&f.BytesReceived, int64(written))
if f.maxWriteSize > 0 && e == nil && atomic.LoadInt64(&f.BytesReceived) > f.maxWriteSize {
e = common.ErrQuotaExceeded
}
if e != nil {
f.TransferError(e)
return written, e
}
f.HandleThrottle()
return written, e
}
// Seek sets the offset for the next Read or Write on the writer to offset,
// interpreted according to whence: 0 means relative to the origin of the file,
// 1 means relative to the current offset, and 2 means relative to the end.
// It returns the new offset and an error, if any.
func (f *webDavFile) Seek(offset int64, whence int) (int64, error) {
f.Connection.UpdateLastActivity()
if f.File != nil {
ret, err := f.File.Seek(offset, whence)
if err != nil {
f.TransferError(err)
}
return ret, err
}
if f.GetType() == common.TransferDownload {
readOffset := f.startOffset + atomic.LoadInt64(&f.BytesSent)
if offset == 0 && readOffset == 0 {
if whence == io.SeekStart {
return 0, nil
} else if whence == io.SeekEnd && f.info != nil {
return f.info.Size(), nil
}
}
// close the reader and create a new one at startByte
if f.reader != nil {
f.reader.Close() //nolint:errcheck
}
startByte := int64(0)
atomic.StoreInt64(&f.BytesReceived, 0)
atomic.StoreInt64(&f.BytesSent, 0)
switch whence {
case io.SeekStart:
startByte = offset
case io.SeekCurrent:
startByte = readOffset + offset
case io.SeekEnd:
if f.info != nil {
startByte = f.info.Size() - offset
} else {
err := errors.New("unable to get file size, seek from end not possible")
f.TransferError(err)
return 0, err
}
}
_, r, cancelFn, err := f.fs.Open(f.GetFsPath(), startByte)
f.Lock()
if err == nil {
f.startOffset = startByte
f.reader = r
}
f.ErrTransfer = err
f.BaseTransfer.SetCancelFn(cancelFn)
f.Unlock()
return startByte, err
}
return 0, common.ErrOpUnsupported
}
// Close closes the open directory or the current transfer
func (f *webDavFile) Close() error {
if err := f.setFinished(); err != nil {
return err
}
err := f.closeIO()
if f.isTransfer() {
errBaseClose := f.BaseTransfer.Close()
if errBaseClose != nil {
err = errBaseClose
}
} else {
f.Connection.RemoveTransfer(f.BaseTransfer)
}
return f.Connection.GetFsError(err)
}
func (f *webDavFile) closeIO() error {
var err error
if f.File != nil {
err = f.File.Close()
} else if f.writer != nil {
err = f.writer.Close()
f.Lock()
// we set ErrTransfer here so quota is not updated, in this case the uploads are atomic
if err != nil && f.ErrTransfer == nil {
f.ErrTransfer = err
}
f.Unlock()
} else if f.reader != nil {
err = f.reader.Close()
}
return err
}
func (f *webDavFile) setFinished() error {
f.Lock()
defer f.Unlock()
if f.isFinished {
return common.ErrTransferClosed
}
f.isFinished = true
return nil
}
func (f *webDavFile) isDir() bool {
if f.info == nil {
return false
}
return f.info.IsDir()
}
func (f *webDavFile) isTransfer() bool {
if f.GetType() == common.TransferDownload {
return (f.reader != nil)
}
return true
}

425
webdavd/handler.go Normal file
View file

@ -0,0 +1,425 @@
package webdavd
import (
"context"
"net/http"
"os"
"path"
"strings"
"github.com/eikenb/pipeat"
"golang.org/x/net/webdav"
"github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/vfs"
)
// Connection details for a WebDav connection.
type Connection struct {
*common.BaseConnection
request *http.Request
}
// GetClientVersion returns the connected client's version.
func (c *Connection) GetClientVersion() string {
if c.request != nil {
return c.request.UserAgent()
}
return ""
}
// GetRemoteAddress return the connected client's address
func (c *Connection) GetRemoteAddress() string {
if c.request != nil {
return c.request.RemoteAddr
}
return ""
}
// SetConnDeadline does nothing
func (c *Connection) SetConnDeadline() {}
// Disconnect closes the active transfer
func (c *Connection) Disconnect() error {
return c.SignalTransfersAbort()
}
// GetCommand returns the request method
func (c *Connection) GetCommand() string {
if c.request != nil {
return strings.ToUpper(c.request.Method)
}
return ""
}
// Mkdir creates a directory using the connection filesystem
func (c *Connection) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
c.UpdateLastActivity()
name = utils.CleanPath(name)
p, err := c.Fs.ResolvePath(name)
if err != nil {
return c.GetFsError(err)
}
return c.CreateDir(p, name)
}
// Rename renames a file or a directory
func (c *Connection) Rename(ctx context.Context, oldName, newName string) error {
c.UpdateLastActivity()
oldName = utils.CleanPath(oldName)
newName = utils.CleanPath(newName)
p, err := c.Fs.ResolvePath(oldName)
if err != nil {
return c.GetFsError(err)
}
t, err := c.Fs.ResolvePath(newName)
if err != nil {
return c.GetFsError(err)
}
if err = c.BaseConnection.Rename(p, t, oldName, newName); err != nil {
return err
}
vfs.SetPathPermissions(c.Fs, t, c.User.GetUID(), c.User.GetGID())
return nil
}
// Stat returns a FileInfo describing the named file/directory, or an error,
// if any happens
func (c *Connection) Stat(ctx context.Context, name string) (os.FileInfo, error) {
c.UpdateLastActivity()
name = utils.CleanPath(name)
if !c.User.HasPerm(dataprovider.PermListItems, path.Dir(name)) {
return nil, c.GetPermissionDeniedError()
}
p, err := c.Fs.ResolvePath(name)
if err != nil {
return nil, c.GetFsError(err)
}
fi, err := c.Fs.Stat(p)
if err != nil {
c.Log(logger.LevelWarn, "error running stat on path %#v: %+v", p, err)
return nil, c.GetFsError(err)
}
return fi, err
}
// RemoveAll removes path and any children it contains.
// If the path does not exist, RemoveAll returns nil (no error).
func (c *Connection) RemoveAll(ctx context.Context, name string) error {
c.UpdateLastActivity()
name = utils.CleanPath(name)
p, err := c.Fs.ResolvePath(name)
if err != nil {
return c.GetFsError(err)
}
var fi os.FileInfo
if fi, err = c.Fs.Lstat(p); err != nil {
c.Log(logger.LevelWarn, "failed to remove a file %#v: stat error: %+v", p, err)
return c.GetFsError(err)
}
if fi.IsDir() && fi.Mode()&os.ModeSymlink != os.ModeSymlink {
return c.removeDirTree(p, name)
}
return c.RemoveFile(p, name, fi)
}
// OpenFile opens the named file with specified flag.
// This method is used for uploads and downloads but also for Stat and Readdir
func (c *Connection) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
c.UpdateLastActivity()
name = utils.CleanPath(name)
p, err := c.Fs.ResolvePath(name)
if err != nil {
return nil, c.GetFsError(err)
}
if flag == os.O_RDONLY {
// Download, Stat or Readdir
fi, err := c.Fs.Lstat(p)
if err != nil {
return nil, c.GetFsError(err)
}
return c.getFile(p, name, fi)
}
return c.putFile(p, name)
}
func (c *Connection) getFile(fsPath, virtualPath string, info os.FileInfo) (webdav.File, error) {
var err error
if info.IsDir() {
if !c.User.HasPerm(dataprovider.PermListItems, virtualPath) {
return nil, c.GetPermissionDeniedError()
}
var file *os.File
if vfs.IsLocalOsFs(c.Fs) {
file, _, _, err = c.Fs.Open(fsPath, 0)
if err != nil {
c.Log(logger.LevelWarn, "could not open directory %#v for reading: %+v", fsPath, err)
return nil, c.GetFsError(err)
}
}
baseTransfer := common.NewBaseTransfer(file, c.BaseConnection, nil, fsPath, virtualPath, common.TransferDownload,
0, 0, false)
return newWebDavFile(baseTransfer, nil, nil, 0, info, c.Fs), nil
}
// we don't know if the file will be downloaded or opened for get properties so we check both permissions
if !c.User.HasPerms([]string{dataprovider.PermDownload, dataprovider.PermListItems}, path.Dir(virtualPath)) {
return nil, c.GetPermissionDeniedError()
}
if !c.User.IsFileAllowed(virtualPath) {
c.Log(logger.LevelWarn, "reading file %#v is not allowed", virtualPath)
return nil, c.GetPermissionDeniedError()
}
var file *os.File
var r *pipeat.PipeReaderAt
var cancelFn func()
// for cloud fs we open the file when we receive the first read to avoid to download the first part of
// the file if it was opened to get stats and not for a real download
if vfs.IsLocalOsFs(c.Fs) {
file, r, cancelFn, err = c.Fs.Open(fsPath, 0)
if err != nil {
c.Log(logger.LevelWarn, "could not open file %#v for reading: %+v", fsPath, err)
return nil, c.GetFsError(err)
}
}
baseTransfer := common.NewBaseTransfer(file, c.BaseConnection, cancelFn, fsPath, virtualPath, common.TransferDownload,
0, 0, false)
return newWebDavFile(baseTransfer, nil, r, 0, info, c.Fs), nil
}
func (c *Connection) putFile(fsPath, virtualPath string) (webdav.File, error) {
if !c.User.IsFileAllowed(virtualPath) {
c.Log(logger.LevelWarn, "writing file %#v is not allowed", virtualPath)
return nil, c.GetPermissionDeniedError()
}
filePath := fsPath
if common.Config.IsAtomicUploadEnabled() && c.Fs.IsAtomicUploadSupported() {
filePath = c.Fs.GetAtomicUploadPath(fsPath)
}
stat, statErr := c.Fs.Lstat(fsPath)
if (statErr == nil && stat.Mode()&os.ModeSymlink == os.ModeSymlink) || c.Fs.IsNotExist(statErr) {
if !c.User.HasPerm(dataprovider.PermUpload, path.Dir(virtualPath)) {
return nil, c.GetPermissionDeniedError()
}
return c.handleUploadToNewFile(fsPath, filePath, virtualPath)
}
if statErr != nil {
c.Log(logger.LevelError, "error performing file stat %#v: %+v", fsPath, statErr)
return nil, c.GetFsError(statErr)
}
// This happen if we upload a file that has the same name of an existing directory
if stat.IsDir() {
c.Log(logger.LevelWarn, "attempted to open a directory for writing to: %#v", fsPath)
return nil, c.GetOpUnsupportedError()
}
if !c.User.HasPerm(dataprovider.PermOverwrite, path.Dir(virtualPath)) {
return nil, c.GetPermissionDeniedError()
}
return c.handleUploadToExistingFile(fsPath, filePath, stat.Size(), virtualPath)
}
func (c *Connection) handleUploadToNewFile(resolvedPath, filePath, requestPath string) (webdav.File, error) {
quotaResult := c.HasSpace(true, requestPath)
if !quotaResult.HasSpace {
c.Log(logger.LevelInfo, "denying file write due to quota limits")
return nil, common.ErrQuotaExceeded
}
file, w, cancelFn, err := c.Fs.Create(filePath, 0)
if err != nil {
c.Log(logger.LevelWarn, "error creating file %#v: %+v", resolvedPath, err)
return nil, c.GetFsError(err)
}
vfs.SetPathPermissions(c.Fs, filePath, c.User.GetUID(), c.User.GetGID())
baseTransfer := common.NewBaseTransfer(file, c.BaseConnection, cancelFn, resolvedPath, requestPath,
common.TransferUpload, 0, 0, true)
return newWebDavFile(baseTransfer, w, nil, quotaResult.GetRemainingSize(), nil, c.Fs), nil
}
func (c *Connection) handleUploadToExistingFile(resolvedPath, filePath string, fileSize int64,
requestPath string) (webdav.File, error) {
var err error
quotaResult := c.HasSpace(false, requestPath)
if !quotaResult.HasSpace {
c.Log(logger.LevelInfo, "denying file write due to quota limits")
return nil, common.ErrQuotaExceeded
}
if common.Config.IsAtomicUploadEnabled() && c.Fs.IsAtomicUploadSupported() {
err = c.Fs.Rename(resolvedPath, filePath)
if err != nil {
c.Log(logger.LevelWarn, "error renaming existing file for atomic upload, source: %#v, dest: %#v, err: %+v",
resolvedPath, filePath, err)
return nil, c.GetFsError(err)
}
}
file, w, cancelFn, err := c.Fs.Create(filePath, 0)
if err != nil {
c.Log(logger.LevelWarn, "error creating file %#v: %+v", resolvedPath, err)
return nil, c.GetFsError(err)
}
initialSize := int64(0)
// if there is a size limit remaining size cannot be 0 here, since quotaResult.HasSpace
// will return false in this case and we deny the upload before
maxWriteSize := quotaResult.GetRemainingSize()
if vfs.IsLocalOsFs(c.Fs) {
vfolder, err := c.User.GetVirtualFolderForPath(path.Dir(requestPath))
if err == nil {
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, 0, -fileSize, false) //nolint:errcheck
if vfolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, 0, -fileSize, false) //nolint:errcheck
}
} else {
dataprovider.UpdateUserQuota(c.User, 0, -fileSize, false) //nolint:errcheck
}
} else {
initialSize = fileSize
}
if maxWriteSize > 0 {
maxWriteSize += fileSize
}
vfs.SetPathPermissions(c.Fs, filePath, c.User.GetUID(), c.User.GetGID())
baseTransfer := common.NewBaseTransfer(file, c.BaseConnection, cancelFn, resolvedPath, requestPath,
common.TransferUpload, 0, initialSize, false)
return newWebDavFile(baseTransfer, w, nil, maxWriteSize, nil, c.Fs), nil
}
type objectMapping struct {
fsPath string
virtualPath string
info os.FileInfo
}
func (c *Connection) removeDirTree(fsPath, virtualPath string) error {
var dirsToRemove []objectMapping
var filesToRemove []objectMapping
err := c.Fs.Walk(fsPath, func(walkedPath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
obj := objectMapping{
fsPath: walkedPath,
virtualPath: c.Fs.GetRelativePath(walkedPath),
info: info,
}
if info.IsDir() {
err = c.IsRemoveDirAllowed(obj.fsPath, obj.virtualPath)
isDuplicated := false
for _, d := range dirsToRemove {
if d.fsPath == obj.fsPath {
isDuplicated = true
break
}
}
if !isDuplicated {
dirsToRemove = append(dirsToRemove, obj)
}
} else {
err = c.IsRemoveFileAllowed(obj.fsPath, obj.virtualPath)
filesToRemove = append(filesToRemove, obj)
}
if err != nil {
c.Log(logger.LevelDebug, "unable to remove dir tree, object %#v->%#v cannot be removed: %v",
virtualPath, fsPath, err)
return err
}
return nil
})
if err != nil {
c.Log(logger.LevelWarn, "failed to remove dir tree %#v->%#v: error: %+v", virtualPath, fsPath, err)
return err
}
for _, fileObj := range filesToRemove {
err = c.RemoveFile(fileObj.fsPath, fileObj.virtualPath, fileObj.info)
if err != nil {
c.Log(logger.LevelDebug, "unable to remove dir tree, error removing file %#v->%#v: %v",
fileObj.virtualPath, fileObj.fsPath, err)
return err
}
}
for _, dirObj := range c.orderDirsToRemove(dirsToRemove) {
err = c.RemoveDir(dirObj.fsPath, dirObj.virtualPath)
if err != nil {
c.Log(logger.LevelDebug, "unable to remove dir tree, error removing directory %#v->%#v: %v",
dirObj.virtualPath, dirObj.fsPath, err)
return err
}
}
return err
}
// order directories so that the empty ones will be at slice start
func (c *Connection) orderDirsToRemove(dirsToRemove []objectMapping) []objectMapping {
orderedDirs := make([]objectMapping, 0, len(dirsToRemove))
removedDirs := make([]string, 0, len(dirsToRemove))
pathSeparator := "/"
if vfs.IsLocalOsFs(c.Fs) {
pathSeparator = string(os.PathSeparator)
}
for len(orderedDirs) < len(dirsToRemove) {
for idx, d := range dirsToRemove {
if utils.IsStringInSlice(d.fsPath, removedDirs) {
continue
}
isEmpty := true
for idx1, d1 := range dirsToRemove {
if idx == idx1 {
continue
}
if utils.IsStringInSlice(d1.fsPath, removedDirs) {
continue
}
if strings.HasPrefix(d1.fsPath, d.fsPath+pathSeparator) {
isEmpty = false
break
}
}
if isEmpty {
orderedDirs = append(orderedDirs, d)
removedDirs = append(removedDirs, d.fsPath)
}
}
}
return orderedDirs
}

612
webdavd/internal_test.go Normal file
View file

@ -0,0 +1,612 @@
package webdavd
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/eikenb/pipeat"
"github.com/stretchr/testify/assert"
"github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/vfs"
)
const (
configDir = ".."
testFile = "test_dav_file"
)
var (
errWalkDir = errors.New("err walk dir")
errWalkFile = errors.New("err walk file")
)
// MockOsFs mockable OsFs
type MockOsFs struct {
vfs.Fs
err error
isAtomicUploadSupported bool
}
// Name returns the name for the Fs implementation
func (fs MockOsFs) Name() string {
return "mockOsFs"
}
// Open returns nil
func (MockOsFs) Open(name string, offset int64) (*os.File, *pipeat.PipeReaderAt, func(), error) {
return nil, nil, nil, nil
}
// IsUploadResumeSupported returns true if upload resume is supported
func (MockOsFs) IsUploadResumeSupported() bool {
return false
}
// IsAtomicUploadSupported returns true if atomic upload is supported
func (fs MockOsFs) IsAtomicUploadSupported() bool {
return fs.isAtomicUploadSupported
}
// Remove removes the named file or (empty) directory.
func (fs MockOsFs) Remove(name string, isDir bool) error {
if fs.err != nil {
return fs.err
}
return os.Remove(name)
}
// Rename renames (moves) source to target
func (fs MockOsFs) Rename(source, target string) error {
if fs.err != nil {
return fs.err
}
return os.Rename(source, target)
}
// Walk returns a duplicate path for testing
func (fs MockOsFs) Walk(root string, walkFn filepath.WalkFunc) error {
if fs.err == errWalkDir {
walkFn("fsdpath", vfs.NewFileInfo("dpath", true, 0, time.Now()), nil) //nolint:errcheck
walkFn("fsdpath", vfs.NewFileInfo("dpath", true, 0, time.Now()), nil) //nolint:errcheck
return nil
}
walkFn("fsfpath", vfs.NewFileInfo("fpath", false, 0, time.Now()), nil) //nolint:errcheck
return fs.err
}
// GetMimeType implements vfs.MimeTyper
func (fs MockOsFs) GetMimeType(name string) (string, error) {
return "application/octet-stream", nil
}
func newMockOsFs(err error, atomicUpload bool, connectionID, rootDir string) vfs.Fs {
return &MockOsFs{
Fs: vfs.NewOsFs(connectionID, rootDir, nil),
err: err,
isAtomicUploadSupported: atomicUpload,
}
}
func TestOrderDirsToRemove(t *testing.T) {
user := dataprovider.User{}
fs := vfs.NewOsFs("id", os.TempDir(), nil)
connection := &Connection{
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, user, fs),
request: nil,
}
dirsToRemove := []objectMapping{}
orderedDirs := connection.orderDirsToRemove(dirsToRemove)
assert.Equal(t, len(dirsToRemove), len(orderedDirs))
dirsToRemove = []objectMapping{
{
fsPath: "dir1",
virtualPath: "",
},
}
orderedDirs = connection.orderDirsToRemove(dirsToRemove)
assert.Equal(t, len(dirsToRemove), len(orderedDirs))
dirsToRemove = []objectMapping{
{
fsPath: "dir1",
virtualPath: "",
},
{
fsPath: "dir12",
virtualPath: "",
},
{
fsPath: filepath.Join("dir1", "a", "b"),
virtualPath: "",
},
{
fsPath: filepath.Join("dir1", "a"),
virtualPath: "",
},
}
orderedDirs = connection.orderDirsToRemove(dirsToRemove)
if assert.Equal(t, len(dirsToRemove), len(orderedDirs)) {
assert.Equal(t, "dir12", orderedDirs[0].fsPath)
assert.Equal(t, filepath.Join("dir1", "a", "b"), orderedDirs[1].fsPath)
assert.Equal(t, filepath.Join("dir1", "a"), orderedDirs[2].fsPath)
assert.Equal(t, "dir1", orderedDirs[3].fsPath)
}
}
func TestUserInvalidParams(t *testing.T) {
u := dataprovider.User{
Username: "username",
HomeDir: "invalid",
}
c := &Configuration{
BindPort: 9000,
}
server, err := newServer(c, configDir)
assert.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("/%v", u.Username), nil)
assert.NoError(t, err)
_, err = server.validateUser(u, req)
if assert.Error(t, err) {
assert.EqualError(t, err, fmt.Sprintf("cannot login user with invalid home dir: %#v", u.HomeDir))
}
u.HomeDir = filepath.Clean(os.TempDir())
subDir := "subdir"
mappedPath1 := filepath.Join(os.TempDir(), "vdir1")
vdirPath1 := "/vdir1"
mappedPath2 := filepath.Join(os.TempDir(), "vdir1", subDir)
vdirPath2 := "/vdir2"
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
BaseVirtualFolder: vfs.BaseVirtualFolder{
MappedPath: mappedPath1,
},
VirtualPath: vdirPath1,
})
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
BaseVirtualFolder: vfs.BaseVirtualFolder{
MappedPath: mappedPath2,
},
VirtualPath: vdirPath2,
})
_, err = server.validateUser(u, req)
if assert.Error(t, err) {
assert.EqualError(t, err, "overlapping mapped folders are allowed only with quota tracking disabled")
}
req.TLS = &tls.ConnectionState{}
writeLog(req, nil)
}
func TestRemoteAddress(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "/username", nil)
assert.NoError(t, err)
assert.Empty(t, req.RemoteAddr)
remoteAddr1 := "100.100.100.100"
remoteAddr2 := "172.172.172.172"
req.Header.Set("X-Forwarded-For", remoteAddr1)
checkRemoteAddress(req)
assert.Equal(t, remoteAddr1, req.RemoteAddr)
req.RemoteAddr = ""
req.Header.Set("X-Forwarded-For", fmt.Sprintf("%v, %v", remoteAddr2, remoteAddr1))
checkRemoteAddress(req)
assert.Equal(t, remoteAddr2, req.RemoteAddr)
req.Header.Del("X-Forwarded-For")
req.RemoteAddr = ""
req.Header.Set("X-Real-IP", remoteAddr1)
checkRemoteAddress(req)
assert.Equal(t, remoteAddr1, req.RemoteAddr)
req.RemoteAddr = ""
oldValue := common.Config.ProxyProtocol
common.Config.ProxyProtocol = 1
checkRemoteAddress(req)
assert.Empty(t, req.RemoteAddr)
common.Config.ProxyProtocol = oldValue
}
func TestConnWithNilRequest(t *testing.T) {
c := &Connection{}
assert.Empty(t, c.GetClientVersion())
assert.Empty(t, c.GetCommand())
assert.Empty(t, c.GetRemoteAddress())
}
func TestResolvePathErrors(t *testing.T) {
ctx := context.Background()
user := dataprovider.User{
HomeDir: "invalid",
}
user.Permissions = make(map[string][]string)
user.Permissions["/"] = []string{dataprovider.PermAny}
fs := vfs.NewOsFs("connID", user.HomeDir, nil)
connection := &Connection{
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, user, fs),
}
err := connection.Mkdir(ctx, "", os.ModePerm)
if assert.Error(t, err) {
assert.EqualError(t, err, common.ErrGenericFailure.Error())
}
err = connection.Rename(ctx, "oldName", "newName")
if assert.Error(t, err) {
assert.EqualError(t, err, common.ErrGenericFailure.Error())
}
_, err = connection.Stat(ctx, "name")
if assert.Error(t, err) {
assert.EqualError(t, err, common.ErrGenericFailure.Error())
}
err = connection.RemoveAll(ctx, "")
if assert.Error(t, err) {
assert.EqualError(t, err, common.ErrGenericFailure.Error())
}
_, err = connection.OpenFile(ctx, "", 0, os.ModePerm)
if assert.Error(t, err) {
assert.EqualError(t, err, common.ErrGenericFailure.Error())
}
if runtime.GOOS != "windows" {
connection.User.HomeDir = filepath.Clean(os.TempDir())
connection.Fs = vfs.NewOsFs("connID", connection.User.HomeDir, nil)
subDir := "sub"
testTxtFile := "file.txt"
err = os.MkdirAll(filepath.Join(os.TempDir(), subDir, subDir), os.ModePerm)
assert.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(os.TempDir(), subDir, subDir, testTxtFile), []byte("content"), os.ModePerm)
assert.NoError(t, err)
err = os.Chmod(filepath.Join(os.TempDir(), subDir, subDir), 0001)
assert.NoError(t, err)
err = connection.Rename(ctx, testTxtFile, path.Join(subDir, subDir, testTxtFile))
if assert.Error(t, err) {
assert.EqualError(t, err, common.ErrPermissionDenied.Error())
}
_, err = connection.putFile(filepath.Join(connection.User.HomeDir, subDir, subDir, testTxtFile),
path.Join(subDir, subDir, testTxtFile))
if assert.Error(t, err) {
assert.EqualError(t, err, common.ErrPermissionDenied.Error())
}
err = os.Chmod(filepath.Join(os.TempDir(), subDir, subDir), os.ModePerm)
assert.NoError(t, err)
err = os.RemoveAll(filepath.Join(os.TempDir(), subDir))
assert.NoError(t, err)
}
}
func TestFileAccessErrors(t *testing.T) {
ctx := context.Background()
user := dataprovider.User{
HomeDir: filepath.Clean(os.TempDir()),
}
user.Permissions = make(map[string][]string)
user.Permissions["/"] = []string{dataprovider.PermAny}
fs := vfs.NewOsFs("connID", user.HomeDir, nil)
connection := &Connection{
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, user, fs),
}
missingPath := "missing path"
fsMissingPath := filepath.Join(user.HomeDir, missingPath)
err := connection.RemoveAll(ctx, missingPath)
if assert.Error(t, err) {
assert.EqualError(t, err, os.ErrNotExist.Error())
}
info := vfs.NewFileInfo(missingPath, true, 0, time.Now())
_, err = connection.getFile(fsMissingPath, missingPath, info)
if assert.Error(t, err) {
assert.EqualError(t, err, os.ErrNotExist.Error())
}
info = vfs.NewFileInfo(missingPath, false, 123, time.Now())
_, err = connection.getFile(fsMissingPath, missingPath, info)
if assert.Error(t, err) {
assert.EqualError(t, err, os.ErrNotExist.Error())
}
p := filepath.Join(user.HomeDir, "adir", missingPath)
_, err = connection.handleUploadToNewFile(p, p, path.Join("adir", missingPath))
if assert.Error(t, err) {
assert.EqualError(t, err, os.ErrNotExist.Error())
}
_, err = connection.handleUploadToExistingFile(p, p, 0, path.Join("adir", missingPath))
if assert.Error(t, err) {
assert.EqualError(t, err, os.ErrNotExist.Error())
}
connection.Fs = newMockOsFs(nil, false, fs.ConnectionID(), user.HomeDir)
_, err = connection.handleUploadToExistingFile(p, p, 0, path.Join("adir", missingPath))
if assert.Error(t, err) {
assert.EqualError(t, err, os.ErrNotExist.Error())
}
f, err := ioutil.TempFile("", "temp")
assert.NoError(t, err)
err = f.Close()
assert.NoError(t, err)
davFile, err := connection.handleUploadToExistingFile(f.Name(), f.Name(), 123, f.Name())
if assert.NoError(t, err) {
transfer := davFile.(*webDavFile)
transfers := connection.GetTransfers()
if assert.Equal(t, 1, len(transfers)) {
assert.Equal(t, transfers[0].ID, transfer.GetID())
assert.Equal(t, int64(123), transfer.InitialSize)
err = transfer.Close()
assert.NoError(t, err)
assert.Equal(t, 0, len(connection.GetTransfers()))
}
}
err = os.Remove(f.Name())
assert.NoError(t, err)
}
func TestRemoveDirTree(t *testing.T) {
user := dataprovider.User{
HomeDir: filepath.Clean(os.TempDir()),
}
user.Permissions = make(map[string][]string)
user.Permissions["/"] = []string{dataprovider.PermAny}
fs := vfs.NewOsFs("connID", user.HomeDir, nil)
connection := &Connection{
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, user, fs),
}
vpath := path.Join("adir", "missing")
p := filepath.Join(user.HomeDir, "adir", "missing")
err := connection.removeDirTree(p, vpath)
if assert.Error(t, err) {
assert.True(t, os.IsNotExist(err))
}
connection.Fs = newMockOsFs(nil, false, "mockID", user.HomeDir)
err = connection.removeDirTree(p, vpath)
if assert.Error(t, err) {
assert.True(t, os.IsNotExist(err))
}
errFake := errors.New("fake err")
connection.Fs = newMockOsFs(errFake, false, "mockID", user.HomeDir)
err = connection.removeDirTree(p, vpath)
if assert.Error(t, err) {
assert.EqualError(t, err, errFake.Error())
}
connection.Fs = newMockOsFs(errWalkDir, true, "mockID", user.HomeDir)
err = connection.removeDirTree(p, vpath)
if assert.Error(t, err) {
assert.True(t, os.IsNotExist(err))
}
connection.Fs = newMockOsFs(errWalkFile, false, "mockID", user.HomeDir)
err = connection.removeDirTree(p, vpath)
if assert.Error(t, err) {
assert.EqualError(t, err, errWalkFile.Error())
}
connection.User.Permissions["/"] = []string{dataprovider.PermListItems}
connection.Fs = newMockOsFs(nil, false, "mockID", user.HomeDir)
err = connection.removeDirTree(p, vpath)
if assert.Error(t, err) {
assert.EqualError(t, err, common.ErrPermissionDenied.Error())
}
}
func TestContentType(t *testing.T) {
user := dataprovider.User{
HomeDir: filepath.Clean(os.TempDir()),
}
user.Permissions = make(map[string][]string)
user.Permissions["/"] = []string{dataprovider.PermAny}
fs := vfs.NewOsFs("connID", user.HomeDir, nil)
connection := &Connection{
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, user, fs),
}
testFilePath := filepath.Join(user.HomeDir, testFile)
ctx := context.Background()
baseTransfer := common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFile,
common.TransferDownload, 0, 0, false)
info := vfs.NewFileInfo(testFilePath, true, 0, time.Now())
davFile := newWebDavFile(baseTransfer, nil, nil, 0, info, fs)
fi, err := davFile.Stat()
if assert.NoError(t, err) {
ctype, err := fi.(webDavFileInfo).ContentType(ctx)
assert.NoError(t, err)
assert.Equal(t, "inode/directory", ctype)
}
err = davFile.Close()
assert.NoError(t, err)
fs = newMockOsFs(nil, false, fs.ConnectionID(), user.GetHomeDir())
err = ioutil.WriteFile(testFilePath, []byte(""), os.ModePerm)
assert.NoError(t, err)
fi, err = os.Stat(testFilePath)
assert.NoError(t, err)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, fi, fs)
fi, err = davFile.Stat()
if assert.NoError(t, err) {
ctype, err := fi.(webDavFileInfo).ContentType(ctx)
assert.NoError(t, err)
assert.Equal(t, "application/octet-stream", ctype)
}
_, err = davFile.Readdir(-1)
assert.Error(t, err)
err = davFile.Close()
assert.NoError(t, err)
err = os.Remove(testFilePath)
assert.NoError(t, err)
}
func TestTransferReadWriteErrors(t *testing.T) {
user := dataprovider.User{
HomeDir: filepath.Clean(os.TempDir()),
}
user.Permissions = make(map[string][]string)
user.Permissions["/"] = []string{dataprovider.PermAny}
fs := vfs.NewOsFs("connID", user.HomeDir, nil)
connection := &Connection{
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, user, fs),
}
testFilePath := filepath.Join(user.HomeDir, testFile)
baseTransfer := common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFile,
common.TransferUpload, 0, 0, false)
davFile := newWebDavFile(baseTransfer, nil, nil, 0, nil, fs)
assert.False(t, davFile.isDir())
p := make([]byte, 1)
_, err := davFile.Read(p)
assert.EqualError(t, err, common.ErrOpUnsupported.Error())
r, w, err := pipeat.Pipe()
assert.NoError(t, err)
davFile = newWebDavFile(baseTransfer, nil, r, 0, nil, fs)
davFile.Connection.RemoveTransfer(davFile.BaseTransfer)
davFile = newWebDavFile(baseTransfer, vfs.NewPipeWriter(w), nil, 0, nil, fs)
davFile.Connection.RemoveTransfer(davFile.BaseTransfer)
err = r.Close()
assert.NoError(t, err)
err = w.Close()
assert.NoError(t, err)
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFile,
common.TransferDownload, 0, 0, false)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, nil, fs)
_, err = davFile.Read(p)
assert.True(t, os.IsNotExist(err))
_, err = davFile.Stat()
assert.True(t, os.IsNotExist(err))
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFile,
common.TransferDownload, 0, 0, false)
err = ioutil.WriteFile(testFilePath, []byte(""), os.ModePerm)
assert.NoError(t, err)
f, err := os.Open(testFilePath)
if assert.NoError(t, err) {
err = f.Close()
assert.NoError(t, err)
}
davFile = newWebDavFile(baseTransfer, nil, nil, 0, nil, fs)
davFile.reader = f
err = davFile.Close()
assert.EqualError(t, err, common.ErrGenericFailure.Error())
err = davFile.Close()
assert.EqualError(t, err, common.ErrTransferClosed.Error())
_, err = davFile.Read(p)
assert.Error(t, err)
info, err := davFile.Stat()
if assert.NoError(t, err) {
assert.Equal(t, int64(0), info.Size())
}
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFile,
common.TransferDownload, 0, 0, false)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, nil, fs)
davFile.writer = f
err = davFile.Close()
assert.EqualError(t, err, common.ErrGenericFailure.Error())
err = os.Remove(testFilePath)
assert.NoError(t, err)
}
func TestTransferSeek(t *testing.T) {
user := dataprovider.User{
HomeDir: filepath.Clean(os.TempDir()),
}
user.Permissions = make(map[string][]string)
user.Permissions["/"] = []string{dataprovider.PermAny}
fs := vfs.NewOsFs("connID", user.HomeDir, nil)
connection := &Connection{
BaseConnection: common.NewBaseConnection(fs.ConnectionID(), common.ProtocolWebDAV, user, fs),
}
testFilePath := filepath.Join(user.HomeDir, testFile)
baseTransfer := common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFile,
common.TransferUpload, 0, 0, false)
davFile := newWebDavFile(baseTransfer, nil, nil, 0, nil, fs)
_, err := davFile.Seek(0, io.SeekStart)
assert.EqualError(t, err, common.ErrOpUnsupported.Error())
err = davFile.Close()
assert.NoError(t, err)
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFile,
common.TransferDownload, 0, 0, false)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, nil, fs)
_, err = davFile.Seek(0, io.SeekCurrent)
assert.True(t, os.IsNotExist(err))
davFile.Connection.RemoveTransfer(davFile.BaseTransfer)
err = ioutil.WriteFile(testFilePath, []byte("content"), os.ModePerm)
assert.NoError(t, err)
f, err := os.Open(testFilePath)
if assert.NoError(t, err) {
err = f.Close()
assert.NoError(t, err)
}
baseTransfer = common.NewBaseTransfer(f, connection.BaseConnection, nil, testFilePath, testFile,
common.TransferDownload, 0, 0, false)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, nil, fs)
_, err = davFile.Seek(0, io.SeekStart)
assert.Error(t, err)
davFile.Connection.RemoveTransfer(davFile.BaseTransfer)
baseTransfer = common.NewBaseTransfer(nil, connection.BaseConnection, nil, testFilePath, testFile,
common.TransferDownload, 0, 0, false)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, nil, fs)
davFile.reader = f
res, err := davFile.Seek(0, io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(0), res)
davFile.Connection.RemoveTransfer(davFile.BaseTransfer)
info, err := os.Stat(testFilePath)
assert.NoError(t, err)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, info, fs)
davFile.reader = f
res, err = davFile.Seek(0, io.SeekEnd)
assert.NoError(t, err)
assert.Equal(t, int64(7), res)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, info, fs)
davFile.reader = f
davFile.fs = newMockOsFs(nil, true, fs.ConnectionID(), user.GetHomeDir())
res, err = davFile.Seek(2, io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(2), res)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, info, fs)
davFile.fs = newMockOsFs(nil, true, fs.ConnectionID(), user.GetHomeDir())
res, err = davFile.Seek(2, io.SeekEnd)
assert.NoError(t, err)
assert.Equal(t, int64(5), res)
davFile = newWebDavFile(baseTransfer, nil, nil, 0, nil, fs)
res, err = davFile.Seek(2, io.SeekEnd)
assert.EqualError(t, err, "unable to get file size, seek from end not possible")
assert.Equal(t, int64(0), res)
assert.Len(t, common.Connections.GetStats(), 0)
err = os.Remove(testFilePath)
assert.NoError(t, err)
}

241
webdavd/server.go Normal file
View file

@ -0,0 +1,241 @@
package webdavd
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net/http"
"path"
"path/filepath"
"strings"
"time"
"github.com/rs/xid"
"golang.org/x/net/webdav"
"github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/metrics"
"github.com/drakkan/sftpgo/utils"
)
var (
err401 = errors.New("Unauthorized")
err403 = errors.New("Forbidden")
xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
xRealIP = http.CanonicalHeaderKey("X-Real-IP")
)
type webDavServer struct {
config *Configuration
certMgr *common.CertManager
}
func newServer(config *Configuration, configDir string) (*webDavServer, error) {
var err error
server := &webDavServer{
config: config,
certMgr: nil,
}
certificateFile := getConfigPath(config.CertificateFile, configDir)
certificateKeyFile := getConfigPath(config.CertificateKeyFile, configDir)
if len(certificateFile) > 0 && len(certificateKeyFile) > 0 {
server.certMgr, err = common.NewCertManager(certificateFile, certificateKeyFile, logSender)
if err != nil {
return server, err
}
}
return server, nil
}
func (s *webDavServer) listenAndServe() error {
httpServer := &http.Server{
Addr: fmt.Sprintf("%s:%d", s.config.BindAddress, s.config.BindPort),
Handler: server,
ReadHeaderTimeout: 30 * time.Second,
IdleTimeout: 120 * time.Second,
MaxHeaderBytes: 1 << 16, // 64KB
}
if s.certMgr != nil {
httpServer.TLSConfig = &tls.Config{
GetCertificate: s.certMgr.GetCertificateFunc(),
}
return httpServer.ListenAndServeTLS("", "")
}
return httpServer.ListenAndServe()
}
// ServeHTTP implements the http.Handler interface
func (s *webDavServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
checkRemoteAddress(r)
if err := common.Config.ExecutePostConnectHook(r.RemoteAddr, common.ProtocolWebDAV); err != nil {
http.Error(w, common.ErrConnectionDenied.Error(), http.StatusForbidden)
return
}
user, err := s.authenticate(r)
if err != nil {
w.Header().Set("WWW-Authenticate", "Basic realm=\"SFTPGo WebDAV\"")
http.Error(w, err401.Error(), http.StatusUnauthorized)
return
}
connectionID, err := s.validateUser(user, r)
if err != nil {
updateLoginMetrics(user.Username, r.RemoteAddr, err)
http.Error(w, err.Error(), http.StatusForbidden)
return
}
fs, err := user.GetFilesystem(connectionID)
if err != nil {
updateLoginMetrics(user.Username, r.RemoteAddr, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
updateLoginMetrics(user.Username, r.RemoteAddr, err)
ctx := context.WithValue(r.Context(), requestIDKey, connectionID)
ctx = context.WithValue(ctx, requestStartKey, time.Now())
connection := &Connection{
BaseConnection: common.NewBaseConnection(connectionID, common.ProtocolWebDAV, user, fs),
request: r,
}
common.Connections.Add(connection)
defer common.Connections.Remove(connection.GetID())
connection.Fs.CheckRootPath(connection.GetUsername(), user.GetUID(), user.GetGID())
connection.Log(logger.LevelInfo, "User id: %d, logged in with WebDAV, method: %v, username: %#v, home_dir: %#v remote addr: %#v",
user.ID, r.Method, user.Username, user.HomeDir, r.RemoteAddr)
dataprovider.UpdateLastLogin(user) //nolint:errcheck
prefix := path.Join("/", user.Username)
// see RFC4918, section 9.4
if r.Method == "GET" {
p := strings.TrimPrefix(path.Clean(r.URL.Path), prefix)
info, err := connection.Stat(ctx, p)
if err == nil && info.IsDir() {
r.Method = "PROPFIND"
if r.Header.Get("Depth") == "" {
r.Header.Add("Depth", "1")
}
}
}
handler := webdav.Handler{
Prefix: prefix,
FileSystem: connection,
LockSystem: webdav.NewMemLS(),
Logger: writeLog,
}
handler.ServeHTTP(w, r.WithContext(ctx))
}
func (s *webDavServer) authenticate(r *http.Request) (dataprovider.User, error) {
var user dataprovider.User
var err error
username, password, ok := r.BasicAuth()
if !ok {
return user, err401
}
user, err = dataprovider.CheckUserAndPass(username, password, utils.GetIPFromRemoteAddress(r.RemoteAddr))
if err != nil {
updateLoginMetrics(username, r.RemoteAddr, err)
return user, err
}
return user, err
}
func (s *webDavServer) validateUser(user dataprovider.User, r *http.Request) (string, error) {
connID := xid.New().String()
connectionID := fmt.Sprintf("%v_%v", common.ProtocolWebDAV, connID)
uriSegments := strings.Split(path.Clean(r.URL.Path), "/")
if len(uriSegments) < 2 || uriSegments[1] != user.Username {
logger.Debug(logSender, connectionID, "URI %#v not allowed for user %#v", r.URL.Path, user.Username)
return connID, err403
}
if !filepath.IsAbs(user.HomeDir) {
logger.Warn(logSender, connectionID, "user %#v has an invalid home dir: %#v. Home dir must be an absolute path, login not allowed",
user.Username, user.HomeDir)
return connID, fmt.Errorf("cannot login user with invalid home dir: %#v", user.HomeDir)
}
if user.MaxSessions > 0 {
activeSessions := common.Connections.GetActiveSessions(user.Username)
if activeSessions >= user.MaxSessions {
logger.Debug(logSender, connID, "authentication refused for user: %#v, too many open sessions: %v/%v", user.Username,
activeSessions, user.MaxSessions)
return connID, fmt.Errorf("too many open sessions: %v", activeSessions)
}
}
if dataprovider.GetQuotaTracking() > 0 && user.HasOverlappedMappedPaths() {
logger.Debug(logSender, connectionID, "cannot login user %#v, overlapping mapped folders are allowed only with quota tracking disabled",
user.Username)
return connID, errors.New("overlapping mapped folders are allowed only with quota tracking disabled")
}
if !user.IsLoginFromAddrAllowed(r.RemoteAddr) {
logger.Debug(logSender, connectionID, "cannot login user %#v, remote address is not allowed: %v", user.Username, r.RemoteAddr)
return connID, fmt.Errorf("Login for user %#v is not allowed from this address: %v", user.Username, r.RemoteAddr)
}
return connID, nil
}
func writeLog(r *http.Request, err error) {
scheme := "http"
if r.TLS != nil {
scheme = "https"
}
fields := map[string]interface{}{
"remote_addr": r.RemoteAddr,
"proto": r.Proto,
"method": r.Method,
"user_agent": r.UserAgent(),
"uri": fmt.Sprintf("%s://%s%s", scheme, r.Host, r.RequestURI)}
if reqID, ok := r.Context().Value(requestIDKey).(string); ok {
fields["request_id"] = reqID
}
if reqStart, ok := r.Context().Value(requestStartKey).(time.Time); ok {
fields["elapsed_ms"] = time.Since(reqStart).Nanoseconds() / 1000000
}
logger.GetLogger().Info().
Timestamp().
Str("sender", logSender).
Fields(fields).
Err(err).
Msg("")
}
func checkRemoteAddress(r *http.Request) {
if common.Config.ProxyProtocol != 0 {
return
}
var ip string
if xrip := r.Header.Get(xRealIP); xrip != "" {
ip = xrip
} else if xff := r.Header.Get(xForwardedFor); xff != "" {
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = strings.TrimSpace(xff[:i])
}
if len(ip) > 0 {
r.RemoteAddr = ip
}
}
func updateLoginMetrics(username, remoteAddress string, err error) {
metrics.AddLoginAttempt(dataprovider.WebDavLoginMethodPassword)
if err != nil {
logger.ConnectionFailedLog(username, utils.GetIPFromRemoteAddress(remoteAddress),
dataprovider.WebDavLoginMethodPassword, err.Error())
}
metrics.AddLoginResult(dataprovider.WebDavLoginMethodPassword, err)
}

67
webdavd/webdavd.go Normal file
View file

@ -0,0 +1,67 @@
// Package webdavd implements the WebDAV protocol
package webdavd
import (
"path/filepath"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils"
)
type ctxReqParams int
const (
requestIDKey ctxReqParams = iota
requestStartKey
)
const (
logSender = "webdavd"
)
var (
server *webDavServer
)
// Configuration defines the configuration for the WevDAV server
type Configuration struct {
// The port used for serving FTP requests
BindPort int `json:"bind_port" mapstructure:"bind_port"`
// The address to listen on. A blank value means listen on all available network interfaces.
BindAddress string `json:"bind_address" mapstructure:"bind_address"`
// If files containing a certificate and matching private key for the server are provided the server will expect
// HTTPS connections.
// Certificate and key files can be reloaded on demand sending a "SIGHUP" signal on Unix based systems and a
// "paramchange" request to the running service on Windows.
CertificateFile string `json:"certificate_file" mapstructure:"certificate_file"`
CertificateKeyFile string `json:"certificate_key_file" mapstructure:"certificate_key_file"`
}
// Initialize configures and starts the WebDav server
func (c *Configuration) Initialize(configDir string) error {
var err error
logger.Debug(logSender, "", "initializing WevDav server with config %+v", *c)
server, err = newServer(c, configDir)
if err != nil {
return err
}
return server.listenAndServe()
}
// ReloadTLSCertificate reloads the TLS certificate and key from the configured paths
func ReloadTLSCertificate() error {
if server != nil && server.certMgr != nil {
return server.certMgr.LoadCertificate(logSender)
}
return nil
}
func getConfigPath(name, configDir string) string {
if !utils.IsFileInputValid(name) {
return ""
}
if len(name) > 0 && !filepath.IsAbs(name) {
return filepath.Join(configDir, name)
}
return name
}

1114
webdavd/webdavd_test.go Normal file

File diff suppressed because it is too large Load diff