mirror of
https://github.com/drakkan/sftpgo.git
synced 2024-11-25 09:00:27 +00:00
refactoring: add common package
The common package defines the interfaces that a protocol must implement and contain code that can be shared among supported protocols. This way should be easier to support new protocols
This commit is contained in:
parent
ded8fad5e4
commit
4e41a5583d
62 changed files with 4893 additions and 3140 deletions
1
.github/workflows/development.yml
vendored
1
.github/workflows/development.yml
vendored
|
@ -58,6 +58,7 @@ jobs:
|
|||
- name: Run test cases using bolt provider
|
||||
run: |
|
||||
go test -v ./config -covermode=atomic
|
||||
go test -v ./common -covermode=atomic
|
||||
go test -v ./httpd -covermode=atomic
|
||||
go test -v ./sftpd -covermode=atomic
|
||||
env:
|
||||
|
|
|
@ -29,14 +29,14 @@ Zsh:
|
|||
$ source <(sftpgo gen completion zsh)
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
$ sftpgo completion zsh > "${fpath[1]}/_sftpgo"
|
||||
$ sftpgo gen completion zsh > "${fpath[1]}/_sftpgo"
|
||||
|
||||
Fish:
|
||||
|
||||
$ sftpgo gen completion fish | source
|
||||
|
||||
# To load completions for each session, execute once:
|
||||
$ sftpgo completion fish > ~/.config/fish/completions/sftpgo.fish
|
||||
$ sftpgo gen completion fish > ~/.config/fish/completions/sftpgo.fish
|
||||
`,
|
||||
DisableFlagsInUseLine: true,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
|
|
157
common/actions.go
Normal file
157
common/actions.go
Normal file
|
@ -0,0 +1,157 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpclient"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnconfiguredAction = errors.New("no hook is configured for this action")
|
||||
errNoHook = errors.New("unable to execute action, no hook defined")
|
||||
errUnexpectedHTTResponse = errors.New("unexpected HTTP response code")
|
||||
)
|
||||
|
||||
// ProtocolActions defines the action to execute on file operations and SSH commands
|
||||
type ProtocolActions struct {
|
||||
// Valid values are download, upload, pre-delete, delete, rename, ssh_cmd. Empty slice to disable
|
||||
ExecuteOn []string `json:"execute_on" mapstructure:"execute_on"`
|
||||
// Absolute path to an external program or an HTTP URL
|
||||
Hook string `json:"hook" mapstructure:"hook"`
|
||||
}
|
||||
|
||||
// actionNotification defines a notification for a Protocol Action
|
||||
type actionNotification struct {
|
||||
Action string `json:"action"`
|
||||
Username string `json:"username"`
|
||||
Path string `json:"path"`
|
||||
TargetPath string `json:"target_path,omitempty"`
|
||||
SSHCmd string `json:"ssh_cmd,omitempty"`
|
||||
FileSize int64 `json:"file_size,omitempty"`
|
||||
FsProvider int `json:"fs_provider"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Status int `json:"status"`
|
||||
Protocol string `json:"protocol"`
|
||||
}
|
||||
|
||||
// SSHCommandActionNotification executes the defined action for the specified SSH command
|
||||
func SSHCommandActionNotification(user *dataprovider.User, filePath, target, sshCmd string, err error) {
|
||||
action := newActionNotification(user, operationSSHCmd, filePath, target, sshCmd, ProtocolSSH, 0, err)
|
||||
go action.execute() //nolint:errcheck
|
||||
}
|
||||
|
||||
func newActionNotification(user *dataprovider.User, operation, filePath, target, sshCmd, protocol string, fileSize int64,
|
||||
err error) actionNotification {
|
||||
bucket := ""
|
||||
endpoint := ""
|
||||
status := 1
|
||||
if user.FsConfig.Provider == 1 {
|
||||
bucket = user.FsConfig.S3Config.Bucket
|
||||
endpoint = user.FsConfig.S3Config.Endpoint
|
||||
} else if user.FsConfig.Provider == 2 {
|
||||
bucket = user.FsConfig.GCSConfig.Bucket
|
||||
}
|
||||
if err == ErrQuotaExceeded {
|
||||
status = 2
|
||||
} else if err != nil {
|
||||
status = 0
|
||||
}
|
||||
return actionNotification{
|
||||
Action: operation,
|
||||
Username: user.Username,
|
||||
Path: filePath,
|
||||
TargetPath: target,
|
||||
SSHCmd: sshCmd,
|
||||
FileSize: fileSize,
|
||||
FsProvider: user.FsConfig.Provider,
|
||||
Bucket: bucket,
|
||||
Endpoint: endpoint,
|
||||
Status: status,
|
||||
Protocol: protocol,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *actionNotification) asJSON() []byte {
|
||||
res, _ := json.Marshal(a)
|
||||
return res
|
||||
}
|
||||
|
||||
func (a *actionNotification) asEnvVars() []string {
|
||||
return []string{fmt.Sprintf("SFTPGO_ACTION=%v", a.Action),
|
||||
fmt.Sprintf("SFTPGO_ACTION_USERNAME=%v", a.Username),
|
||||
fmt.Sprintf("SFTPGO_ACTION_PATH=%v", a.Path),
|
||||
fmt.Sprintf("SFTPGO_ACTION_TARGET=%v", a.TargetPath),
|
||||
fmt.Sprintf("SFTPGO_ACTION_SSH_CMD=%v", a.SSHCmd),
|
||||
fmt.Sprintf("SFTPGO_ACTION_FILE_SIZE=%v", a.FileSize),
|
||||
fmt.Sprintf("SFTPGO_ACTION_FS_PROVIDER=%v", a.FsProvider),
|
||||
fmt.Sprintf("SFTPGO_ACTION_BUCKET=%v", a.Bucket),
|
||||
fmt.Sprintf("SFTPGO_ACTION_ENDPOINT=%v", a.Endpoint),
|
||||
fmt.Sprintf("SFTPGO_ACTION_STATUS=%v", a.Status),
|
||||
fmt.Sprintf("SFTPGO_ACTION_PROTOCOL=%v", a.Protocol),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *actionNotification) executeNotificationCommand() error {
|
||||
if !filepath.IsAbs(Config.Actions.Hook) {
|
||||
err := fmt.Errorf("invalid notification command %#v", Config.Actions.Hook)
|
||||
logger.Warn(a.Protocol, "", "unable to execute notification command: %v", err)
|
||||
return err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
cmd := exec.CommandContext(ctx, Config.Actions.Hook, a.Action, a.Username, a.Path, a.TargetPath, a.SSHCmd)
|
||||
cmd.Env = append(os.Environ(), a.asEnvVars()...)
|
||||
startTime := time.Now()
|
||||
err := cmd.Run()
|
||||
logger.Debug(a.Protocol, "", "executed command %#v with arguments: %#v, %#v, %#v, %#v, %#v, elapsed: %v, error: %v",
|
||||
Config.Actions.Hook, a.Action, a.Username, a.Path, a.TargetPath, a.SSHCmd, time.Since(startTime), err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *actionNotification) execute() error {
|
||||
if !utils.IsStringInSlice(a.Action, Config.Actions.ExecuteOn) {
|
||||
return errUnconfiguredAction
|
||||
}
|
||||
if len(Config.Actions.Hook) == 0 {
|
||||
logger.Warn(a.Protocol, "", "Unable to send notification, no hook is defined")
|
||||
return errNoHook
|
||||
}
|
||||
if strings.HasPrefix(Config.Actions.Hook, "http") {
|
||||
var url *url.URL
|
||||
url, err := url.Parse(Config.Actions.Hook)
|
||||
if err != nil {
|
||||
logger.Warn(a.Protocol, "", "Invalid hook %#v for operation %#v: %v", Config.Actions.Hook, a.Action, err)
|
||||
return err
|
||||
}
|
||||
startTime := time.Now()
|
||||
httpClient := httpclient.GetHTTPClient()
|
||||
resp, err := httpClient.Post(url.String(), "application/json", bytes.NewBuffer(a.asJSON()))
|
||||
respCode := 0
|
||||
if err == nil {
|
||||
respCode = resp.StatusCode
|
||||
resp.Body.Close()
|
||||
if respCode != http.StatusOK {
|
||||
err = errUnexpectedHTTResponse
|
||||
}
|
||||
}
|
||||
logger.Debug(a.Protocol, "", "notified operation %#v to URL: %v status code: %v, elapsed: %v err: %v",
|
||||
a.Action, url.String(), respCode, time.Since(startTime), err)
|
||||
return err
|
||||
}
|
||||
return a.executeNotificationCommand()
|
||||
}
|
181
common/actions_test.go
Normal file
181
common/actions_test.go
Normal file
|
@ -0,0 +1,181 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
func TestNewActionNotification(t *testing.T) {
|
||||
user := &dataprovider.User{
|
||||
Username: "username",
|
||||
}
|
||||
user.FsConfig.Provider = 0
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{
|
||||
Bucket: "s3bucket",
|
||||
Endpoint: "endpoint",
|
||||
}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{
|
||||
Bucket: "gcsbucket",
|
||||
}
|
||||
a := newActionNotification(user, operationDownload, "path", "target", "", ProtocolSFTP, 123, errors.New("fake error"))
|
||||
assert.Equal(t, user.Username, a.Username)
|
||||
assert.Equal(t, 0, len(a.Bucket))
|
||||
assert.Equal(t, 0, len(a.Endpoint))
|
||||
assert.Equal(t, 0, a.Status)
|
||||
|
||||
user.FsConfig.Provider = 1
|
||||
a = newActionNotification(user, operationDownload, "path", "target", "", ProtocolSSH, 123, nil)
|
||||
assert.Equal(t, "s3bucket", a.Bucket)
|
||||
assert.Equal(t, "endpoint", a.Endpoint)
|
||||
assert.Equal(t, 1, a.Status)
|
||||
|
||||
user.FsConfig.Provider = 2
|
||||
a = newActionNotification(user, operationDownload, "path", "target", "", ProtocolSCP, 123, ErrQuotaExceeded)
|
||||
assert.Equal(t, "gcsbucket", a.Bucket)
|
||||
assert.Equal(t, 0, len(a.Endpoint))
|
||||
assert.Equal(t, 2, a.Status)
|
||||
}
|
||||
|
||||
func TestActionHTTP(t *testing.T) {
|
||||
actionsCopy := Config.Actions
|
||||
|
||||
Config.Actions = ProtocolActions{
|
||||
ExecuteOn: []string{operationDownload},
|
||||
Hook: fmt.Sprintf("http://%v", httpAddr),
|
||||
}
|
||||
user := &dataprovider.User{
|
||||
Username: "username",
|
||||
}
|
||||
a := newActionNotification(user, operationDownload, "path", "target", "", ProtocolSFTP, 123, nil)
|
||||
err := a.execute()
|
||||
assert.NoError(t, err)
|
||||
|
||||
Config.Actions.Hook = "http://invalid:1234"
|
||||
err = a.execute()
|
||||
assert.Error(t, err)
|
||||
|
||||
Config.Actions.Hook = fmt.Sprintf("http://%v/404", httpAddr)
|
||||
err = a.execute()
|
||||
if assert.Error(t, err) {
|
||||
assert.EqualError(t, err, errUnexpectedHTTResponse.Error())
|
||||
}
|
||||
|
||||
Config.Actions = actionsCopy
|
||||
}
|
||||
|
||||
func TestActionCMD(t *testing.T) {
|
||||
if runtime.GOOS == osWindows {
|
||||
t.Skip("this test is not available on Windows")
|
||||
}
|
||||
actionsCopy := Config.Actions
|
||||
|
||||
hookCmd, err := exec.LookPath("true")
|
||||
assert.NoError(t, err)
|
||||
|
||||
Config.Actions = ProtocolActions{
|
||||
ExecuteOn: []string{operationDownload},
|
||||
Hook: hookCmd,
|
||||
}
|
||||
user := &dataprovider.User{
|
||||
Username: "username",
|
||||
}
|
||||
a := newActionNotification(user, operationDownload, "path", "target", "", ProtocolSFTP, 123, nil)
|
||||
err = a.execute()
|
||||
assert.NoError(t, err)
|
||||
|
||||
SSHCommandActionNotification(user, "path", "target", "sha1sum", nil)
|
||||
|
||||
Config.Actions = actionsCopy
|
||||
}
|
||||
|
||||
func TestWrongActions(t *testing.T) {
|
||||
actionsCopy := Config.Actions
|
||||
|
||||
badCommand := "/bad/command"
|
||||
if runtime.GOOS == osWindows {
|
||||
badCommand = "C:\\bad\\command"
|
||||
}
|
||||
Config.Actions = ProtocolActions{
|
||||
ExecuteOn: []string{operationUpload},
|
||||
Hook: badCommand,
|
||||
}
|
||||
user := &dataprovider.User{
|
||||
Username: "username",
|
||||
}
|
||||
|
||||
a := newActionNotification(user, operationUpload, "", "", "", ProtocolSFTP, 123, nil)
|
||||
err := a.execute()
|
||||
assert.Error(t, err, "action with bad command must fail")
|
||||
|
||||
a.Action = operationDelete
|
||||
err = a.execute()
|
||||
assert.EqualError(t, err, errUnconfiguredAction.Error())
|
||||
|
||||
Config.Actions.Hook = "http://foo\x7f.com/"
|
||||
a.Action = operationUpload
|
||||
err = a.execute()
|
||||
assert.Error(t, err, "action with bad url must fail")
|
||||
|
||||
Config.Actions.Hook = ""
|
||||
err = a.execute()
|
||||
if assert.Error(t, err) {
|
||||
assert.EqualError(t, err, errNoHook.Error())
|
||||
}
|
||||
|
||||
Config.Actions.Hook = "relative path"
|
||||
err = a.execute()
|
||||
if assert.Error(t, err) {
|
||||
assert.EqualError(t, err, fmt.Sprintf("invalid notification command %#v", Config.Actions.Hook))
|
||||
}
|
||||
|
||||
Config.Actions = actionsCopy
|
||||
}
|
||||
|
||||
func TestPreDeleteAction(t *testing.T) {
|
||||
if runtime.GOOS == osWindows {
|
||||
t.Skip("this test is not available on Windows")
|
||||
}
|
||||
actionsCopy := Config.Actions
|
||||
|
||||
hookCmd, err := exec.LookPath("true")
|
||||
assert.NoError(t, err)
|
||||
Config.Actions = ProtocolActions{
|
||||
ExecuteOn: []string{operationPreDelete},
|
||||
Hook: hookCmd,
|
||||
}
|
||||
homeDir := filepath.Join(os.TempDir(), "test_user")
|
||||
err = os.MkdirAll(homeDir, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
user := dataprovider.User{
|
||||
Username: "username",
|
||||
HomeDir: homeDir,
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
fs := vfs.NewOsFs("id", homeDir, nil)
|
||||
c := NewBaseConnection("id", ProtocolSFTP, user, fs)
|
||||
|
||||
testfile := filepath.Join(user.HomeDir, "testfile")
|
||||
err = ioutil.WriteFile(testfile, []byte("test"), 0666)
|
||||
assert.NoError(t, err)
|
||||
info, err := os.Stat(testfile)
|
||||
assert.NoError(t, err)
|
||||
err = c.RemoveFile(testfile, "testfile", info)
|
||||
assert.NoError(t, err)
|
||||
assert.FileExists(t, testfile)
|
||||
|
||||
os.RemoveAll(homeDir)
|
||||
|
||||
Config.Actions = actionsCopy
|
||||
}
|
554
common/common.go
Normal file
554
common/common.go
Normal file
|
@ -0,0 +1,554 @@
|
|||
// Package common defines code shared among file transfer packages and protocols
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pires/go-proxyproto"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/metrics"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
)
|
||||
|
||||
// constants
|
||||
const (
|
||||
uploadLogSender = "Upload"
|
||||
downloadLogSender = "Download"
|
||||
renameLogSender = "Rename"
|
||||
rmdirLogSender = "Rmdir"
|
||||
mkdirLogSender = "Mkdir"
|
||||
symlinkLogSender = "Symlink"
|
||||
removeLogSender = "Remove"
|
||||
chownLogSender = "Chown"
|
||||
chmodLogSender = "Chmod"
|
||||
chtimesLogSender = "Chtimes"
|
||||
operationDownload = "download"
|
||||
operationUpload = "upload"
|
||||
operationDelete = "delete"
|
||||
operationPreDelete = "pre-delete"
|
||||
operationRename = "rename"
|
||||
operationSSHCmd = "ssh_cmd"
|
||||
chtimesFormat = "2006-01-02T15:04:05" // YYYY-MM-DDTHH:MM:SS
|
||||
idleTimeoutCheckInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
// Stat flags
|
||||
const (
|
||||
StatAttrUIDGID = 1
|
||||
StatAttrPerms = 2
|
||||
StatAttrTimes = 4
|
||||
)
|
||||
|
||||
// Transfer types
|
||||
const (
|
||||
TransferUpload = iota
|
||||
TransferDownload
|
||||
)
|
||||
|
||||
// Supported protocols
|
||||
const (
|
||||
ProtocolSFTP = "SFTP"
|
||||
ProtocolSCP = "SCP"
|
||||
ProtocolSSH = "SSH"
|
||||
)
|
||||
|
||||
// Upload modes
|
||||
const (
|
||||
UploadModeStandard = iota
|
||||
UploadModeAtomic
|
||||
UploadModeAtomicWithResume
|
||||
)
|
||||
|
||||
// errors definitions
|
||||
var (
|
||||
ErrPermissionDenied = errors.New("permission denied")
|
||||
ErrNotExist = errors.New("no such file or directory")
|
||||
ErrOpUnsupported = errors.New("operation unsupported")
|
||||
ErrGenericFailure = errors.New("failure")
|
||||
ErrQuotaExceeded = errors.New("denying write due to space limit")
|
||||
ErrSkipPermissionsCheck = errors.New("permission check skipped")
|
||||
)
|
||||
|
||||
var (
|
||||
// Config is the configuration for the supported protocols
|
||||
Config Configuration
|
||||
// Connections is the list of active connections
|
||||
Connections ActiveConnections
|
||||
// QuotaScans is the list of active quota scans
|
||||
QuotaScans ActiveScans
|
||||
idleTimeoutTicker *time.Ticker
|
||||
idleTimeoutTickerDone chan bool
|
||||
supportedProcols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH}
|
||||
)
|
||||
|
||||
// Initialize sets the common configuration
|
||||
func Initialize(c Configuration) {
|
||||
Config = c
|
||||
Config.idleTimeoutAsDuration = time.Duration(Config.IdleTimeout) * time.Minute
|
||||
if Config.IdleTimeout > 0 {
|
||||
startIdleTimeoutTicker(idleTimeoutCheckInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func startIdleTimeoutTicker(duration time.Duration) {
|
||||
stopIdleTimeoutTicker()
|
||||
idleTimeoutTicker = time.NewTicker(duration)
|
||||
idleTimeoutTickerDone = make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-idleTimeoutTickerDone:
|
||||
return
|
||||
case <-idleTimeoutTicker.C:
|
||||
Connections.checkIdleConnections()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func stopIdleTimeoutTicker() {
|
||||
if idleTimeoutTicker != nil {
|
||||
idleTimeoutTicker.Stop()
|
||||
idleTimeoutTickerDone <- true
|
||||
idleTimeoutTicker = nil
|
||||
}
|
||||
}
|
||||
|
||||
// ActiveTransfer defines the interface for the current active transfers
|
||||
type ActiveTransfer interface {
|
||||
GetID() uint64
|
||||
GetType() int
|
||||
GetSize() int64
|
||||
GetVirtualPath() string
|
||||
GetStartTime() time.Time
|
||||
}
|
||||
|
||||
// ActiveConnection defines the interface for the current active connections
|
||||
type ActiveConnection interface {
|
||||
GetID() string
|
||||
GetUsername() string
|
||||
GetRemoteAddress() string
|
||||
GetClientVersion() string
|
||||
GetProtocol() string
|
||||
GetConnectionTime() time.Time
|
||||
GetLastActivity() time.Time
|
||||
GetCommand() string
|
||||
Disconnect() error
|
||||
SetConnDeadline()
|
||||
AddTransfer(t ActiveTransfer)
|
||||
RemoveTransfer(t ActiveTransfer)
|
||||
GetTransfers() []ConnectionTransfer
|
||||
}
|
||||
|
||||
// StatAttributes defines the attributes for set stat commands
|
||||
type StatAttributes struct {
|
||||
Mode os.FileMode
|
||||
Atime time.Time
|
||||
Mtime time.Time
|
||||
UID int
|
||||
GID int
|
||||
Flags int
|
||||
}
|
||||
|
||||
// ConnectionTransfer defines the trasfer details to expose
|
||||
type ConnectionTransfer struct {
|
||||
ID uint64 `json:"-"`
|
||||
OperationType string `json:"operation_type"`
|
||||
StartTime int64 `json:"start_time"`
|
||||
Size int64 `json:"size"`
|
||||
VirtualPath string `json:"path"`
|
||||
}
|
||||
|
||||
func (t *ConnectionTransfer) getConnectionTransferAsString() string {
|
||||
result := ""
|
||||
if t.OperationType == operationUpload {
|
||||
result += "UL"
|
||||
} else {
|
||||
result += "DL"
|
||||
}
|
||||
result += fmt.Sprintf(" %#v ", t.VirtualPath)
|
||||
if t.Size > 0 {
|
||||
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(t.StartTime))
|
||||
speed := float64(t.Size) / float64(utils.GetTimeAsMsSinceEpoch(time.Now())-t.StartTime)
|
||||
result += fmt.Sprintf("Size: %#v Elapsed: %#v Speed: \"%.1f KB/s\"", utils.ByteCountSI(t.Size),
|
||||
utils.GetDurationAsString(elapsed), speed)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Configuration defines configuration parameters common to all supported protocols
|
||||
type Configuration struct {
|
||||
// Maximum idle timeout as minutes. If a client is idle for a time that exceeds this setting it will be disconnected.
|
||||
// 0 means disabled
|
||||
IdleTimeout int `json:"idle_timeout" mapstructure:"idle_timeout"`
|
||||
// UploadMode 0 means standard, the files are uploaded directly to the requested path.
|
||||
// 1 means atomic: the files are uploaded to a temporary path and renamed to the requested path
|
||||
// when the client ends the upload. Atomic mode avoid problems such as a web server that
|
||||
// serves partial files when the files are being uploaded.
|
||||
// In atomic mode if there is an upload error the temporary file is deleted and so the requested
|
||||
// upload path will not contain a partial file.
|
||||
// 2 means atomic with resume support: as atomic but if there is an upload error the temporary
|
||||
// file is renamed to the requested path and not deleted, this way a client can reconnect and resume
|
||||
// the upload.
|
||||
UploadMode int `json:"upload_mode" mapstructure:"upload_mode"`
|
||||
// Actions to execute for SFTP file operations and SSH commands
|
||||
Actions ProtocolActions `json:"actions" mapstructure:"actions"`
|
||||
// SetstatMode 0 means "normal mode": requests for changing permissions and owner/group are executed.
|
||||
// 1 means "ignore mode": requests for changing permissions and owner/group are silently ignored.
|
||||
SetstatMode int `json:"setstat_mode" mapstructure:"setstat_mode"`
|
||||
// Support for HAProxy PROXY protocol.
|
||||
// If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGNIX, you can enable
|
||||
// the proxy protocol. It provides a convenient way to safely transport connection information
|
||||
// such as a client's address across multiple layers of NAT or TCP proxies to get the real
|
||||
// client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported.
|
||||
// - 0 means disabled
|
||||
// - 1 means proxy protocol enabled. Proxy header will be used and requests without proxy header will be accepted.
|
||||
// - 2 means proxy protocol required. Proxy header will be used and requests without proxy header will be rejected.
|
||||
// If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too,
|
||||
// for example for HAProxy add "send-proxy" or "send-proxy-v2" to each server configuration line.
|
||||
ProxyProtocol int `json:"proxy_protocol" mapstructure:"proxy_protocol"`
|
||||
// List of IP addresses and IP ranges allowed to send the proxy header.
|
||||
// If proxy protocol is set to 1 and we receive a proxy header from an IP that is not in the list then the
|
||||
// connection will be accepted and the header will be ignored.
|
||||
// If proxy protocol is set to 2 and we receive a proxy header from an IP that is not in the list then the
|
||||
// connection will be rejected.
|
||||
ProxyAllowed []string `json:"proxy_allowed" mapstructure:"proxy_allowed"`
|
||||
idleTimeoutAsDuration time.Duration
|
||||
}
|
||||
|
||||
// IsAtomicUploadEnabled returns true if atomic upload is enabled
|
||||
func (c *Configuration) IsAtomicUploadEnabled() bool {
|
||||
return c.UploadMode == UploadModeAtomic || c.UploadMode == UploadModeAtomicWithResume
|
||||
}
|
||||
|
||||
// GetProxyListener returns a wrapper for the given listener that supports the
|
||||
// HAProxy Proxy Protocol or nil if the proxy protocol is not configured
|
||||
func (c *Configuration) GetProxyListener(listener net.Listener) (*proxyproto.Listener, error) {
|
||||
var proxyListener *proxyproto.Listener
|
||||
var err error
|
||||
if c.ProxyProtocol > 0 {
|
||||
var policyFunc func(upstream net.Addr) (proxyproto.Policy, error)
|
||||
if c.ProxyProtocol == 1 && len(c.ProxyAllowed) > 0 {
|
||||
policyFunc, err = proxyproto.LaxWhiteListPolicy(c.ProxyAllowed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if c.ProxyProtocol == 2 {
|
||||
if len(c.ProxyAllowed) == 0 {
|
||||
policyFunc = func(upstream net.Addr) (proxyproto.Policy, error) {
|
||||
return proxyproto.REQUIRE, nil
|
||||
}
|
||||
} else {
|
||||
policyFunc, err = proxyproto.StrictWhiteListPolicy(c.ProxyAllowed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
proxyListener = &proxyproto.Listener{
|
||||
Listener: listener,
|
||||
Policy: policyFunc,
|
||||
}
|
||||
}
|
||||
return proxyListener, nil
|
||||
}
|
||||
|
||||
// ActiveConnections holds the currect active connections with the associated transfers
|
||||
type ActiveConnections struct {
|
||||
sync.RWMutex
|
||||
connections []ActiveConnection
|
||||
}
|
||||
|
||||
// GetActiveSessions returns the number of active sessions for the given username.
|
||||
// We return the open sessions for any protocol
|
||||
func (conns *ActiveConnections) GetActiveSessions(username string) int {
|
||||
conns.RLock()
|
||||
defer conns.RUnlock()
|
||||
|
||||
numSessions := 0
|
||||
for _, c := range conns.connections {
|
||||
if c.GetUsername() == username {
|
||||
numSessions++
|
||||
}
|
||||
}
|
||||
return numSessions
|
||||
}
|
||||
|
||||
// Add adds a new connection to the active ones
|
||||
func (conns *ActiveConnections) Add(c ActiveConnection) {
|
||||
conns.Lock()
|
||||
defer conns.Unlock()
|
||||
|
||||
conns.connections = append(conns.connections, c)
|
||||
metrics.UpdateActiveConnectionsSize(len(conns.connections))
|
||||
logger.Debug(c.GetProtocol(), c.GetID(), "connection added, num open connections: %v", len(conns.connections))
|
||||
}
|
||||
|
||||
// Remove removes a connection from the active ones
|
||||
func (conns *ActiveConnections) Remove(c ActiveConnection) {
|
||||
conns.Lock()
|
||||
defer conns.Unlock()
|
||||
|
||||
indexToRemove := -1
|
||||
for i, v := range conns.connections {
|
||||
if v.GetID() == c.GetID() {
|
||||
indexToRemove = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if indexToRemove >= 0 {
|
||||
conns.connections[indexToRemove] = conns.connections[len(conns.connections)-1]
|
||||
conns.connections[len(conns.connections)-1] = nil
|
||||
conns.connections = conns.connections[:len(conns.connections)-1]
|
||||
logger.Debug(c.GetProtocol(), c.GetID(), "connection removed, num open connections: %v",
|
||||
len(conns.connections))
|
||||
} else {
|
||||
logger.Warn(c.GetProtocol(), c.GetID(), "connection to remove not found!")
|
||||
}
|
||||
// we have finished to send data here and most of the time the underlying network connection
|
||||
// is already closed. Sometime a client can still be reading the last sended data, so we set
|
||||
// a deadline instead of directly closing the network connection.
|
||||
// Setting a deadline on an already closed connection has no effect.
|
||||
// We only need to ensure that a connection will not remain indefinitely open and so the
|
||||
// underlying file descriptor is not released.
|
||||
// This should protect us against buggy clients and edge cases.
|
||||
c.SetConnDeadline()
|
||||
}
|
||||
|
||||
// Close closes an active connection.
|
||||
// It returns true on success
|
||||
func (conns *ActiveConnections) Close(connectionID string) bool {
|
||||
conns.RLock()
|
||||
result := false
|
||||
|
||||
for _, c := range conns.connections {
|
||||
if c.GetID() == connectionID {
|
||||
defer func() {
|
||||
err := c.Disconnect()
|
||||
logger.Debug(c.GetProtocol(), c.GetID(), "close connection requested, close err: %v", err)
|
||||
}()
|
||||
result = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
conns.RUnlock()
|
||||
return result
|
||||
}
|
||||
|
||||
func (conns *ActiveConnections) checkIdleConnections() {
|
||||
conns.RLock()
|
||||
|
||||
for _, c := range conns.connections {
|
||||
idleTime := time.Since(c.GetLastActivity())
|
||||
if idleTime > Config.idleTimeoutAsDuration {
|
||||
defer func() {
|
||||
err := c.Disconnect()
|
||||
logger.Debug(c.GetProtocol(), c.GetID(), "close idle connection, idle time: %v, close err: %v", idleTime, err)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
conns.RUnlock()
|
||||
}
|
||||
|
||||
// GetStats returns stats for active connections
|
||||
func (conns *ActiveConnections) GetStats() []ConnectionStatus {
|
||||
conns.RLock()
|
||||
defer conns.RUnlock()
|
||||
|
||||
stats := make([]ConnectionStatus, 0, len(conns.connections))
|
||||
for _, c := range conns.connections {
|
||||
stat := ConnectionStatus{
|
||||
Username: c.GetUsername(),
|
||||
ConnectionID: c.GetID(),
|
||||
ClientVersion: c.GetClientVersion(),
|
||||
RemoteAddress: c.GetRemoteAddress(),
|
||||
ConnectionTime: utils.GetTimeAsMsSinceEpoch(c.GetConnectionTime()),
|
||||
LastActivity: utils.GetTimeAsMsSinceEpoch(c.GetLastActivity()),
|
||||
Protocol: c.GetProtocol(),
|
||||
SSHCommand: c.GetCommand(),
|
||||
Transfers: c.GetTransfers(),
|
||||
}
|
||||
stats = append(stats, stat)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
// ConnectionStatus returns the status for an active connection
|
||||
type ConnectionStatus struct {
|
||||
// Logged in username
|
||||
Username string `json:"username"`
|
||||
// Unique identifier for the connection
|
||||
ConnectionID string `json:"connection_id"`
|
||||
// client's version string
|
||||
ClientVersion string `json:"client_version,omitempty"`
|
||||
// Remote address for this connection
|
||||
RemoteAddress string `json:"remote_address"`
|
||||
// Connection time as unix timestamp in milliseconds
|
||||
ConnectionTime int64 `json:"connection_time"`
|
||||
// Last activity as unix timestamp in milliseconds
|
||||
LastActivity int64 `json:"last_activity"`
|
||||
// Protocol for this connection: SFTP, SCP, SSH
|
||||
Protocol string `json:"protocol"`
|
||||
// active uploads/downloads
|
||||
Transfers []ConnectionTransfer `json:"active_transfers,omitempty"`
|
||||
// for the SSH protocol this is the issued command
|
||||
SSHCommand string `json:"ssh_command,omitempty"`
|
||||
}
|
||||
|
||||
// GetConnectionDuration returns the connection duration as string
|
||||
func (c ConnectionStatus) GetConnectionDuration() string {
|
||||
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(c.ConnectionTime))
|
||||
return utils.GetDurationAsString(elapsed)
|
||||
}
|
||||
|
||||
// GetConnectionInfo returns connection info.
|
||||
// Protocol,Client Version and RemoteAddress are returned.
|
||||
// For SSH commands the issued command is returned too.
|
||||
func (c ConnectionStatus) GetConnectionInfo() string {
|
||||
result := fmt.Sprintf("%v. Client: %#v From: %#v", c.Protocol, c.ClientVersion, c.RemoteAddress)
|
||||
if c.Protocol == ProtocolSSH && len(c.SSHCommand) > 0 {
|
||||
result += fmt.Sprintf(". Command: %#v", c.SSHCommand)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetTransfersAsString returns the active transfers as string
|
||||
func (c ConnectionStatus) GetTransfersAsString() string {
|
||||
result := ""
|
||||
for _, t := range c.Transfers {
|
||||
if len(result) > 0 {
|
||||
result += ". "
|
||||
}
|
||||
result += t.getConnectionTransferAsString()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ActiveQuotaScan defines an active quota scan for a user home dir
|
||||
type ActiveQuotaScan struct {
|
||||
// Username to which the quota scan refers
|
||||
Username string `json:"username"`
|
||||
// quota scan start time as unix timestamp in milliseconds
|
||||
StartTime int64 `json:"start_time"`
|
||||
}
|
||||
|
||||
// ActiveVirtualFolderQuotaScan defines an active quota scan for a virtual folder
|
||||
type ActiveVirtualFolderQuotaScan struct {
|
||||
// folder path to which the quota scan refers
|
||||
MappedPath string `json:"mapped_path"`
|
||||
// quota scan start time as unix timestamp in milliseconds
|
||||
StartTime int64 `json:"start_time"`
|
||||
}
|
||||
|
||||
// ActiveScans holds the active quota scans
|
||||
type ActiveScans struct {
|
||||
sync.RWMutex
|
||||
UserHomeScans []ActiveQuotaScan
|
||||
FolderScans []ActiveVirtualFolderQuotaScan
|
||||
}
|
||||
|
||||
// GetUsersQuotaScans returns the active quota scans for users home directories
|
||||
func (s *ActiveScans) GetUsersQuotaScans() []ActiveQuotaScan {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
scans := make([]ActiveQuotaScan, len(s.UserHomeScans))
|
||||
copy(scans, s.UserHomeScans)
|
||||
return scans
|
||||
}
|
||||
|
||||
// AddUserQuotaScan adds a user to the ones with active quota scans.
|
||||
// Returns false if the user has a quota scan already running
|
||||
func (s *ActiveScans) AddUserQuotaScan(username string) bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, scan := range s.UserHomeScans {
|
||||
if scan.Username == username {
|
||||
return false
|
||||
}
|
||||
}
|
||||
s.UserHomeScans = append(s.UserHomeScans, ActiveQuotaScan{
|
||||
Username: username,
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
// RemoveUserQuotaScan removes a user from the ones with active quota scans.
|
||||
// Returns false if the user has no active quota scans
|
||||
func (s *ActiveScans) RemoveUserQuotaScan(username string) bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
indexToRemove := -1
|
||||
for i, scan := range s.UserHomeScans {
|
||||
if scan.Username == username {
|
||||
indexToRemove = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if indexToRemove >= 0 {
|
||||
s.UserHomeScans[indexToRemove] = s.UserHomeScans[len(s.UserHomeScans)-1]
|
||||
s.UserHomeScans = s.UserHomeScans[:len(s.UserHomeScans)-1]
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetVFoldersQuotaScans returns the active quota scans for virtual folders
|
||||
func (s *ActiveScans) GetVFoldersQuotaScans() []ActiveVirtualFolderQuotaScan {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
scans := make([]ActiveVirtualFolderQuotaScan, len(s.FolderScans))
|
||||
copy(scans, s.FolderScans)
|
||||
return scans
|
||||
}
|
||||
|
||||
// AddVFolderQuotaScan adds a virtual folder to the ones with active quota scans.
|
||||
// Returns false if the folder has a quota scan already running
|
||||
func (s *ActiveScans) AddVFolderQuotaScan(folderPath string) bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, scan := range s.FolderScans {
|
||||
if scan.MappedPath == folderPath {
|
||||
return false
|
||||
}
|
||||
}
|
||||
s.FolderScans = append(s.FolderScans, ActiveVirtualFolderQuotaScan{
|
||||
MappedPath: folderPath,
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
// RemoveVFolderQuotaScan removes a folder from the ones with active quota scans.
|
||||
// Returns false if the folder has no active quota scans
|
||||
func (s *ActiveScans) RemoveVFolderQuotaScan(folderPath string) bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
indexToRemove := -1
|
||||
for i, scan := range s.FolderScans {
|
||||
if scan.MappedPath == folderPath {
|
||||
indexToRemove = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if indexToRemove >= 0 {
|
||||
s.FolderScans[indexToRemove] = s.FolderScans[len(s.FolderScans)-1]
|
||||
s.FolderScans = s.FolderScans[:len(s.FolderScans)-1]
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
318
common/common_test.go
Normal file
318
common/common_test.go
Normal file
|
@ -0,0 +1,318 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpclient"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
logSender = "common_test"
|
||||
httpAddr = "127.0.0.1:9999"
|
||||
httpProxyAddr = "127.0.0.1:7777"
|
||||
configDir = ".."
|
||||
osWindows = "windows"
|
||||
userTestUsername = "common_test_username"
|
||||
userTestPwd = "common_test_pwd"
|
||||
)
|
||||
|
||||
type providerConf struct {
|
||||
Config dataprovider.Config `json:"data_provider" mapstructure:"data_provider"`
|
||||
}
|
||||
|
||||
type fakeConnection struct {
|
||||
*BaseConnection
|
||||
sshCommand string
|
||||
}
|
||||
|
||||
func (c *fakeConnection) Disconnect() error {
|
||||
Connections.Remove(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeConnection) GetClientVersion() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *fakeConnection) GetCommand() string {
|
||||
return c.sshCommand
|
||||
}
|
||||
|
||||
func (c *fakeConnection) GetRemoteAddress() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *fakeConnection) SetConnDeadline() {}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logfilePath := "common_test.log"
|
||||
logger.InitLogger(logfilePath, 5, 1, 28, false, zerolog.DebugLevel)
|
||||
|
||||
viper.SetEnvPrefix("sftpgo")
|
||||
replacer := strings.NewReplacer(".", "__")
|
||||
viper.SetEnvKeyReplacer(replacer)
|
||||
viper.SetConfigName("sftpgo")
|
||||
viper.AutomaticEnv()
|
||||
viper.AllowEmptyEnv(true)
|
||||
|
||||
driver, err := initializeDataprovider(-1)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error initializing data provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.InfoToConsole("Starting COMMON tests, provider: %v", driver)
|
||||
Initialize(Configuration{})
|
||||
httpConfig := httpclient.Config{
|
||||
Timeout: 5,
|
||||
}
|
||||
httpConfig.Initialize(configDir)
|
||||
|
||||
go func() {
|
||||
// start a test HTTP server to receive action notifications
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "OK\n")
|
||||
})
|
||||
http.HandleFunc("/404", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprintf(w, "Not found\n")
|
||||
})
|
||||
if err := http.ListenAndServe(httpAddr, nil); err != nil {
|
||||
logger.ErrorToConsole("could not start HTTP notification server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
Config.ProxyProtocol = 2
|
||||
listener, err := net.Listen("tcp", httpProxyAddr)
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("error creating listener for proxy protocol server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
proxyListener, err := Config.GetProxyListener(listener)
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("error creating proxy protocol listener: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
Config.ProxyProtocol = 0
|
||||
|
||||
s := &http.Server{}
|
||||
if err := s.Serve(proxyListener); err != nil {
|
||||
logger.ErrorToConsole("could not start HTTP proxy protocol server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
waitTCPListening(httpAddr)
|
||||
waitTCPListening(httpProxyAddr)
|
||||
exitCode := m.Run()
|
||||
os.Remove(logfilePath) //nolint:errcheck
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func waitTCPListening(address string) {
|
||||
for {
|
||||
conn, err := net.Dial("tcp", address)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("tcp server %v not listening: %v\n", address, err)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
logger.InfoToConsole("tcp server %v now listening\n", address)
|
||||
conn.Close()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func initializeDataprovider(trackQuota int) (string, error) {
|
||||
configDir := ".."
|
||||
viper.AddConfigPath(configDir)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
var cfg providerConf
|
||||
if err := viper.Unmarshal(&cfg); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if trackQuota >= 0 && trackQuota <= 2 {
|
||||
cfg.Config.TrackQuota = trackQuota
|
||||
}
|
||||
return cfg.Config.Driver, dataprovider.Initialize(cfg.Config, configDir)
|
||||
}
|
||||
|
||||
func closeDataprovider() error {
|
||||
return dataprovider.Close()
|
||||
}
|
||||
|
||||
func TestIdleConnections(t *testing.T) {
|
||||
configCopy := Config
|
||||
|
||||
Config.IdleTimeout = 1
|
||||
Initialize(Config)
|
||||
|
||||
username := "test_user"
|
||||
user := dataprovider.User{
|
||||
Username: username,
|
||||
}
|
||||
c := NewBaseConnection("id", ProtocolSFTP, user, nil)
|
||||
c.lastActivity = time.Now().Add(-24 * time.Hour).UnixNano()
|
||||
fakeConn := &fakeConnection{
|
||||
BaseConnection: c,
|
||||
}
|
||||
Connections.Add(fakeConn)
|
||||
assert.Equal(t, Connections.GetActiveSessions(username), 1)
|
||||
startIdleTimeoutTicker(100 * time.Millisecond)
|
||||
assert.Eventually(t, func() bool { return Connections.GetActiveSessions(username) == 0 }, 1*time.Second, 200*time.Millisecond)
|
||||
stopIdleTimeoutTicker()
|
||||
|
||||
Config = configCopy
|
||||
}
|
||||
|
||||
func TestCloseConnection(t *testing.T) {
|
||||
c := NewBaseConnection("id", ProtocolSFTP, dataprovider.User{}, nil)
|
||||
fakeConn := &fakeConnection{
|
||||
BaseConnection: c,
|
||||
}
|
||||
Connections.Add(fakeConn)
|
||||
assert.Len(t, Connections.GetStats(), 1)
|
||||
res := Connections.Close(fakeConn.GetID())
|
||||
assert.True(t, res)
|
||||
assert.Eventually(t, func() bool { return len(Connections.GetStats()) == 0 }, 300*time.Millisecond, 50*time.Millisecond)
|
||||
res = Connections.Close(fakeConn.GetID())
|
||||
assert.False(t, res)
|
||||
Connections.Remove(fakeConn)
|
||||
}
|
||||
|
||||
func TestAtomicUpload(t *testing.T) {
|
||||
configCopy := Config
|
||||
|
||||
Config.UploadMode = UploadModeStandard
|
||||
assert.False(t, Config.IsAtomicUploadEnabled())
|
||||
Config.UploadMode = UploadModeAtomic
|
||||
assert.True(t, Config.IsAtomicUploadEnabled())
|
||||
Config.UploadMode = UploadModeAtomicWithResume
|
||||
assert.True(t, Config.IsAtomicUploadEnabled())
|
||||
|
||||
Config = configCopy
|
||||
}
|
||||
|
||||
func TestConnectionStatus(t *testing.T) {
|
||||
username := "test_user"
|
||||
user := dataprovider.User{
|
||||
Username: username,
|
||||
}
|
||||
c1 := NewBaseConnection("id1", ProtocolSFTP, user, nil)
|
||||
fakeConn1 := &fakeConnection{
|
||||
BaseConnection: c1,
|
||||
}
|
||||
t1 := NewBaseTransfer(nil, c1, nil, "/p1", "/r1", TransferUpload, 0, 0, true)
|
||||
t1.BytesReceived = 123
|
||||
t2 := NewBaseTransfer(nil, c1, nil, "/p2", "/r2", TransferDownload, 0, 0, true)
|
||||
t2.BytesSent = 456
|
||||
c2 := NewBaseConnection("id2", ProtocolSSH, user, nil)
|
||||
fakeConn2 := &fakeConnection{
|
||||
BaseConnection: c2,
|
||||
sshCommand: "md5sum",
|
||||
}
|
||||
Connections.Add(fakeConn1)
|
||||
Connections.Add(fakeConn2)
|
||||
|
||||
stats := Connections.GetStats()
|
||||
assert.Len(t, stats, 2)
|
||||
for _, stat := range stats {
|
||||
assert.Equal(t, stat.Username, username)
|
||||
assert.True(t, strings.HasPrefix(stat.GetConnectionInfo(), stat.Protocol))
|
||||
assert.True(t, strings.HasPrefix(stat.GetConnectionDuration(), "00:"))
|
||||
if stat.ConnectionID == "SFTP_id1" {
|
||||
assert.Len(t, stat.Transfers, 2)
|
||||
assert.Greater(t, len(stat.GetTransfersAsString()), 0)
|
||||
for _, tr := range stat.Transfers {
|
||||
if tr.OperationType == operationDownload {
|
||||
assert.True(t, strings.HasPrefix(tr.getConnectionTransferAsString(), "DL"))
|
||||
} else if tr.OperationType == operationUpload {
|
||||
assert.True(t, strings.HasPrefix(tr.getConnectionTransferAsString(), "UL"))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert.Equal(t, 0, len(stat.GetTransfersAsString()))
|
||||
}
|
||||
}
|
||||
|
||||
err := t1.Close()
|
||||
assert.NoError(t, err)
|
||||
err = t2.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
Connections.Remove(fakeConn1)
|
||||
Connections.Remove(fakeConn2)
|
||||
stats = Connections.GetStats()
|
||||
assert.Len(t, stats, 0)
|
||||
}
|
||||
|
||||
func TestQuotaScans(t *testing.T) {
|
||||
username := "username"
|
||||
assert.True(t, QuotaScans.AddUserQuotaScan(username))
|
||||
assert.False(t, QuotaScans.AddUserQuotaScan(username))
|
||||
if assert.Len(t, QuotaScans.GetUsersQuotaScans(), 1) {
|
||||
assert.Equal(t, QuotaScans.GetUsersQuotaScans()[0].Username, username)
|
||||
}
|
||||
|
||||
assert.True(t, QuotaScans.RemoveUserQuotaScan(username))
|
||||
assert.False(t, QuotaScans.RemoveUserQuotaScan(username))
|
||||
assert.Len(t, QuotaScans.GetUsersQuotaScans(), 0)
|
||||
|
||||
folderName := "/folder"
|
||||
assert.True(t, QuotaScans.AddVFolderQuotaScan(folderName))
|
||||
assert.False(t, QuotaScans.AddVFolderQuotaScan(folderName))
|
||||
if assert.Len(t, QuotaScans.GetVFoldersQuotaScans(), 1) {
|
||||
assert.Equal(t, QuotaScans.GetVFoldersQuotaScans()[0].MappedPath, folderName)
|
||||
}
|
||||
|
||||
assert.True(t, QuotaScans.RemoveVFolderQuotaScan(folderName))
|
||||
assert.False(t, QuotaScans.RemoveVFolderQuotaScan(folderName))
|
||||
assert.Len(t, QuotaScans.GetVFoldersQuotaScans(), 0)
|
||||
}
|
||||
|
||||
func TestProxyProtocolVersion(t *testing.T) {
|
||||
c := Configuration{
|
||||
ProxyProtocol: 1,
|
||||
}
|
||||
proxyListener, err := c.GetProxyListener(nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, proxyListener.Policy)
|
||||
|
||||
c.ProxyProtocol = 2
|
||||
proxyListener, err = c.GetProxyListener(nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, proxyListener.Policy)
|
||||
|
||||
c.ProxyProtocol = 1
|
||||
c.ProxyAllowed = []string{"invalid"}
|
||||
_, err = c.GetProxyListener(nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
c.ProxyProtocol = 2
|
||||
_, err = c.GetProxyListener(nil)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestProxyProtocol(t *testing.T) {
|
||||
httpClient := httpclient.GetHTTPClient()
|
||||
resp, err := httpClient.Get(fmt.Sprintf("http://%v", httpProxyAddr))
|
||||
if assert.NoError(t, err) {
|
||||
defer resp.Body.Close()
|
||||
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||
}
|
||||
}
|
800
common/connection.go
Normal file
800
common/connection.go
Normal file
|
@ -0,0 +1,800 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/sftp"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
// BaseConnection defines common fields for a connection using any supported protocol
|
||||
type BaseConnection struct {
|
||||
// Unique identifier for the connection
|
||||
ID string
|
||||
// user associated with this connection if any
|
||||
User dataprovider.User
|
||||
// start time for this connection
|
||||
startTime time.Time
|
||||
protocol string
|
||||
Fs vfs.Fs
|
||||
sync.RWMutex
|
||||
// last activity for this connection
|
||||
lastActivity int64
|
||||
transferID uint64
|
||||
activeTransfers []ActiveTransfer
|
||||
}
|
||||
|
||||
// NewBaseConnection returns a new BaseConnection
|
||||
func NewBaseConnection(ID, protocol string, user dataprovider.User, fs vfs.Fs) *BaseConnection {
|
||||
connID := ID
|
||||
if utils.IsStringInSlice(protocol, supportedProcols) {
|
||||
connID = fmt.Sprintf("%v_%v", protocol, ID)
|
||||
}
|
||||
return &BaseConnection{
|
||||
ID: connID,
|
||||
User: user,
|
||||
startTime: time.Now(),
|
||||
protocol: protocol,
|
||||
Fs: fs,
|
||||
lastActivity: time.Now().UnixNano(),
|
||||
transferID: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Log outputs a log entry to the configured logger
|
||||
func (c *BaseConnection) Log(level logger.LogLevel, format string, v ...interface{}) {
|
||||
logger.Log(level, c.protocol, c.ID, format, v...)
|
||||
}
|
||||
|
||||
// GetTransferID returns an unique transfer ID for this connection
|
||||
func (c *BaseConnection) GetTransferID() uint64 {
|
||||
return atomic.AddUint64(&c.transferID, 1)
|
||||
}
|
||||
|
||||
// GetID returns the connection ID
|
||||
func (c *BaseConnection) GetID() string {
|
||||
return c.ID
|
||||
}
|
||||
|
||||
// GetUsername returns the authenticated username associated with this connection if any
|
||||
func (c *BaseConnection) GetUsername() string {
|
||||
return c.User.Username
|
||||
}
|
||||
|
||||
// GetProtocol returns the protocol for the connection
|
||||
func (c *BaseConnection) GetProtocol() string {
|
||||
return c.protocol
|
||||
}
|
||||
|
||||
// SetProtocol sets the protocol for this connection
|
||||
func (c *BaseConnection) SetProtocol(protocol string) {
|
||||
c.protocol = protocol
|
||||
if utils.IsStringInSlice(c.protocol, supportedProcols) {
|
||||
c.ID = fmt.Sprintf("%v_%v", c.protocol, c.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// GetConnectionTime returns the initial connection time
|
||||
func (c *BaseConnection) GetConnectionTime() time.Time {
|
||||
return c.startTime
|
||||
}
|
||||
|
||||
// UpdateLastActivity updates last activity for this connection
|
||||
func (c *BaseConnection) UpdateLastActivity() {
|
||||
atomic.StoreInt64(&c.lastActivity, time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// GetLastActivity returns the last connection activity
|
||||
func (c *BaseConnection) GetLastActivity() time.Time {
|
||||
return time.Unix(0, atomic.LoadInt64(&c.lastActivity))
|
||||
}
|
||||
|
||||
// AddTransfer associates a new transfer to this connection
|
||||
func (c *BaseConnection) AddTransfer(t ActiveTransfer) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.activeTransfers = append(c.activeTransfers, t)
|
||||
c.Log(logger.LevelDebug, "transfer added, id: %v, active transfers: %v", t.GetID(), len(c.activeTransfers))
|
||||
}
|
||||
|
||||
// RemoveTransfer removes the specified transfer from the active ones
|
||||
func (c *BaseConnection) RemoveTransfer(t ActiveTransfer) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
indexToRemove := -1
|
||||
for i, v := range c.activeTransfers {
|
||||
if v.GetID() == t.GetID() {
|
||||
indexToRemove = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if indexToRemove >= 0 {
|
||||
c.activeTransfers[indexToRemove] = c.activeTransfers[len(c.activeTransfers)-1]
|
||||
c.activeTransfers[len(c.activeTransfers)-1] = nil
|
||||
c.activeTransfers = c.activeTransfers[:len(c.activeTransfers)-1]
|
||||
c.Log(logger.LevelDebug, "transfer removed, id: %v active transfers: %v", t.GetID(), len(c.activeTransfers))
|
||||
} else {
|
||||
c.Log(logger.LevelWarn, "transfer to remove not found!")
|
||||
}
|
||||
}
|
||||
|
||||
// GetTransfers returns the active transfers
|
||||
func (c *BaseConnection) GetTransfers() []ConnectionTransfer {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
transfers := make([]ConnectionTransfer, 0, len(c.activeTransfers))
|
||||
for _, t := range c.activeTransfers {
|
||||
var operationType string
|
||||
if t.GetType() == TransferUpload {
|
||||
operationType = operationUpload
|
||||
} else {
|
||||
operationType = operationDownload
|
||||
}
|
||||
transfers = append(transfers, ConnectionTransfer{
|
||||
ID: t.GetID(),
|
||||
OperationType: operationType,
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(t.GetStartTime()),
|
||||
Size: t.GetSize(),
|
||||
VirtualPath: t.GetVirtualPath(),
|
||||
})
|
||||
}
|
||||
|
||||
return transfers
|
||||
}
|
||||
|
||||
// ListDir reads the directory named by fsPath and returns a list of directory entries
|
||||
func (c *BaseConnection) ListDir(fsPath, virtualPath string) ([]os.FileInfo, error) {
|
||||
if !c.User.HasPerm(dataprovider.PermListItems, virtualPath) {
|
||||
return nil, c.GetPermissionDeniedError()
|
||||
}
|
||||
files, err := c.Fs.ReadDir(fsPath)
|
||||
if err != nil {
|
||||
c.Log(logger.LevelWarn, "error listing directory: %+v", err)
|
||||
return nil, c.GetFsError(err)
|
||||
}
|
||||
return c.User.AddVirtualDirs(files, virtualPath), nil
|
||||
}
|
||||
|
||||
// CreateDir creates a new directory at the specified fsPath
|
||||
func (c *BaseConnection) CreateDir(fsPath, virtualPath string) error {
|
||||
if !c.User.HasPerm(dataprovider.PermCreateDirs, path.Dir(virtualPath)) {
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if c.User.IsVirtualFolder(virtualPath) {
|
||||
c.Log(logger.LevelWarn, "mkdir not allowed %#v is a virtual folder", virtualPath)
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if err := c.Fs.Mkdir(fsPath); err != nil {
|
||||
c.Log(logger.LevelWarn, "error creating dir: %#v error: %+v", fsPath, err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
vfs.SetPathPermissions(c.Fs, fsPath, c.User.GetUID(), c.User.GetGID())
|
||||
|
||||
logger.CommandLog(mkdirLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, -1, -1, "", "", "")
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveFile removes a file at the specified fsPath
|
||||
func (c *BaseConnection) RemoveFile(fsPath, virtualPath string, info os.FileInfo) error {
|
||||
if !c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualPath)) {
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if !c.User.IsFileAllowed(virtualPath) {
|
||||
c.Log(logger.LevelDebug, "removing file %#v is not allowed", fsPath)
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
size := info.Size()
|
||||
action := newActionNotification(&c.User, operationPreDelete, fsPath, "", "", c.protocol, size, nil)
|
||||
actionErr := action.execute()
|
||||
if actionErr == nil {
|
||||
c.Log(logger.LevelDebug, "remove for path %#v handled by pre-delete action", fsPath)
|
||||
} else {
|
||||
if err := c.Fs.Remove(fsPath, false); err != nil {
|
||||
c.Log(logger.LevelWarn, "failed to remove a file/symlink %#v: %+v", fsPath, err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
}
|
||||
|
||||
logger.CommandLog(removeLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, -1, -1, "", "", "")
|
||||
if info.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
vfolder, err := c.User.GetVirtualFolderForPath(path.Dir(virtualPath))
|
||||
if err == nil {
|
||||
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, -1, -size, false) //nolint:errcheck
|
||||
if vfolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(c.User, -1, -size, false) //nolint:errcheck
|
||||
}
|
||||
} else {
|
||||
dataprovider.UpdateUserQuota(c.User, -1, -size, false) //nolint:errcheck
|
||||
}
|
||||
}
|
||||
if actionErr != nil {
|
||||
action := newActionNotification(&c.User, operationDelete, fsPath, "", "", c.protocol, size, nil)
|
||||
go action.execute() //nolint:errcheck
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveDir removes a directory at the specified fsPath
|
||||
func (c *BaseConnection) RemoveDir(fsPath, virtualPath string) error {
|
||||
if c.Fs.GetRelativePath(fsPath) == "/" {
|
||||
c.Log(logger.LevelWarn, "removing root dir is not allowed")
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if c.User.IsVirtualFolder(virtualPath) {
|
||||
c.Log(logger.LevelWarn, "removing a virtual folder is not allowed: %#v", virtualPath)
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if c.User.HasVirtualFoldersInside(virtualPath) {
|
||||
c.Log(logger.LevelWarn, "removing a directory with a virtual folder inside is not allowed: %#v", virtualPath)
|
||||
return c.GetOpUnsupportedError()
|
||||
}
|
||||
if c.User.IsMappedPath(fsPath) {
|
||||
c.Log(logger.LevelWarn, "removing a directory mapped as virtual folder is not allowed: %#v", fsPath)
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if !c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualPath)) {
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
|
||||
var fi os.FileInfo
|
||||
var err error
|
||||
if fi, err = c.Fs.Lstat(fsPath); err != nil {
|
||||
c.Log(logger.LevelWarn, "failed to remove a dir %#v: stat error: %+v", fsPath, err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
if !fi.IsDir() || fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
c.Log(logger.LevelDebug, "cannot remove %#v is not a directory", fsPath)
|
||||
return c.GetGenericError()
|
||||
}
|
||||
|
||||
if err := c.Fs.Remove(fsPath, true); err != nil {
|
||||
c.Log(logger.LevelWarn, "failed to remove directory %#v: %+v", fsPath, err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
|
||||
logger.CommandLog(rmdirLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, -1, -1, "", "", "")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rename renames (moves) fsSourcePath to fsTargetPath
|
||||
func (c *BaseConnection) Rename(fsSourcePath, fsTargetPath, virtualSourcePath, virtualTargetPath string) error {
|
||||
if c.User.IsMappedPath(fsSourcePath) {
|
||||
c.Log(logger.LevelWarn, "renaming a directory mapped as virtual folder is not allowed: %#v", fsSourcePath)
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if c.User.IsMappedPath(fsTargetPath) {
|
||||
c.Log(logger.LevelWarn, "renaming to a directory mapped as virtual folder is not allowed: %#v", fsTargetPath)
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
srcInfo, err := c.Fs.Lstat(fsSourcePath)
|
||||
if err != nil {
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
if !c.isRenamePermitted(fsSourcePath, virtualSourcePath, virtualTargetPath, srcInfo) {
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
initialSize := int64(-1)
|
||||
if dstInfo, err := c.Fs.Lstat(fsTargetPath); err == nil {
|
||||
if dstInfo.IsDir() {
|
||||
c.Log(logger.LevelWarn, "attempted to rename %#v overwriting an existing directory %#v",
|
||||
fsSourcePath, fsTargetPath)
|
||||
return c.GetOpUnsupportedError()
|
||||
}
|
||||
// we are overwriting an existing file/symlink
|
||||
if dstInfo.Mode().IsRegular() {
|
||||
initialSize = dstInfo.Size()
|
||||
}
|
||||
if !c.User.HasPerm(dataprovider.PermOverwrite, path.Dir(virtualTargetPath)) {
|
||||
c.Log(logger.LevelDebug, "renaming is not allowed, %#v -> %#v. Target exists but the user "+
|
||||
"has no overwrite permission", virtualSourcePath, virtualTargetPath)
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
}
|
||||
if srcInfo.IsDir() {
|
||||
if c.User.HasVirtualFoldersInside(virtualSourcePath) {
|
||||
c.Log(logger.LevelDebug, "renaming the folder %#v is not supported: it has virtual folders inside it",
|
||||
virtualSourcePath)
|
||||
return c.GetOpUnsupportedError()
|
||||
}
|
||||
if err = c.checkRecursiveRenameDirPermissions(fsSourcePath, fsTargetPath); err != nil {
|
||||
c.Log(logger.LevelDebug, "error checking recursive permissions before renaming %#v: %+v", fsSourcePath, err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
}
|
||||
if !c.hasSpaceForRename(virtualSourcePath, virtualTargetPath, initialSize, fsSourcePath) {
|
||||
c.Log(logger.LevelInfo, "denying cross rename due to space limit")
|
||||
return c.GetGenericError()
|
||||
}
|
||||
if err := c.Fs.Rename(fsSourcePath, fsTargetPath); err != nil {
|
||||
c.Log(logger.LevelWarn, "failed to rename %#v -> %#v: %+v", fsSourcePath, fsTargetPath, err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
if dataprovider.GetQuotaTracking() > 0 {
|
||||
c.updateQuotaAfterRename(virtualSourcePath, virtualTargetPath, fsTargetPath, initialSize) //nolint:errcheck
|
||||
}
|
||||
logger.CommandLog(renameLogSender, fsSourcePath, fsTargetPath, c.User.Username, "", c.ID, c.protocol, -1, -1,
|
||||
"", "", "")
|
||||
action := newActionNotification(&c.User, operationRename, fsSourcePath, fsTargetPath, "", c.protocol, 0, nil)
|
||||
// the returned error is used in test cases only, we already log the error inside action.execute
|
||||
go action.execute() //nolint:errcheck
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSymlink creates fsTargetPath as a symbolic link to fsSourcePath
|
||||
func (c *BaseConnection) CreateSymlink(fsSourcePath, fsTargetPath, virtualSourcePath, virtualTargetPath string) error {
|
||||
if c.Fs.GetRelativePath(fsSourcePath) == "/" {
|
||||
c.Log(logger.LevelWarn, "symlinking root dir is not allowed")
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if c.User.IsVirtualFolder(virtualTargetPath) {
|
||||
c.Log(logger.LevelWarn, "symlinking a virtual folder is not allowed")
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if !c.User.HasPerm(dataprovider.PermCreateSymlinks, path.Dir(virtualTargetPath)) {
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if c.isCrossFoldersRequest(virtualSourcePath, virtualTargetPath) {
|
||||
c.Log(logger.LevelWarn, "cross folder symlink is not supported, src: %v dst: %v", virtualSourcePath, virtualTargetPath)
|
||||
return c.GetGenericError()
|
||||
}
|
||||
if c.User.IsMappedPath(fsSourcePath) {
|
||||
c.Log(logger.LevelWarn, "symlinking a directory mapped as virtual folder is not allowed: %#v", fsSourcePath)
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if c.User.IsMappedPath(fsTargetPath) {
|
||||
c.Log(logger.LevelWarn, "symlinking to a directory mapped as virtual folder is not allowed: %#v", fsTargetPath)
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if err := c.Fs.Symlink(fsSourcePath, fsTargetPath); err != nil {
|
||||
c.Log(logger.LevelWarn, "failed to create symlink %#v -> %#v: %+v", fsSourcePath, fsTargetPath, err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
logger.CommandLog(symlinkLogSender, fsSourcePath, fsTargetPath, c.User.Username, "", c.ID, c.protocol, -1, -1, "", "", "")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStat set StatAttributes for the specified fsPath
|
||||
func (c *BaseConnection) SetStat(fsPath, virtualPath string, attributes *StatAttributes) error {
|
||||
if Config.SetstatMode == 1 {
|
||||
return nil
|
||||
}
|
||||
pathForPerms := virtualPath
|
||||
if fi, err := c.Fs.Lstat(fsPath); err == nil {
|
||||
if fi.IsDir() {
|
||||
pathForPerms = path.Dir(virtualPath)
|
||||
}
|
||||
}
|
||||
if attributes.Flags&StatAttrPerms != 0 {
|
||||
if !c.User.HasPerm(dataprovider.PermChmod, pathForPerms) {
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if err := c.Fs.Chmod(fsPath, attributes.Mode); err != nil {
|
||||
c.Log(logger.LevelWarn, "failed to chmod path %#v, mode: %v, err: %+v", fsPath, attributes.Mode.String(), err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
logger.CommandLog(chmodLogSender, fsPath, "", c.User.Username, attributes.Mode.String(), c.ID, c.protocol,
|
||||
-1, -1, "", "", "")
|
||||
}
|
||||
|
||||
if attributes.Flags&StatAttrUIDGID != 0 {
|
||||
if !c.User.HasPerm(dataprovider.PermChown, pathForPerms) {
|
||||
return c.GetPermissionDeniedError()
|
||||
}
|
||||
if err := c.Fs.Chown(fsPath, attributes.UID, attributes.GID); err != nil {
|
||||
c.Log(logger.LevelWarn, "failed to chown path %#v, uid: %v, gid: %v, err: %+v", fsPath, attributes.UID,
|
||||
attributes.GID, err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
logger.CommandLog(chownLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, attributes.UID, attributes.GID,
|
||||
"", "", "")
|
||||
}
|
||||
|
||||
if attributes.Flags&StatAttrTimes != 0 {
|
||||
if !c.User.HasPerm(dataprovider.PermChtimes, pathForPerms) {
|
||||
return sftp.ErrSSHFxPermissionDenied
|
||||
}
|
||||
|
||||
if err := c.Fs.Chtimes(fsPath, attributes.Atime, attributes.Mtime); err != nil {
|
||||
c.Log(logger.LevelWarn, "failed to chtimes for path %#v, access time: %v, modification time: %v, err: %+v",
|
||||
fsPath, attributes.Atime, attributes.Mtime, err)
|
||||
return c.GetFsError(err)
|
||||
}
|
||||
accessTimeString := attributes.Atime.Format(chtimesFormat)
|
||||
modificationTimeString := attributes.Mtime.Format(chtimesFormat)
|
||||
logger.CommandLog(chtimesLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, -1, -1,
|
||||
accessTimeString, modificationTimeString, "")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BaseConnection) checkRecursiveRenameDirPermissions(sourcePath, targetPath string) error {
|
||||
dstPerms := []string{
|
||||
dataprovider.PermCreateDirs,
|
||||
dataprovider.PermUpload,
|
||||
dataprovider.PermCreateSymlinks,
|
||||
}
|
||||
|
||||
err := c.Fs.Walk(sourcePath, func(walkedPath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath := strings.Replace(walkedPath, sourcePath, targetPath, 1)
|
||||
virtualSrcPath := c.Fs.GetRelativePath(walkedPath)
|
||||
virtualDstPath := c.Fs.GetRelativePath(dstPath)
|
||||
// walk scans the directory tree in order, checking the parent dirctory permissions we are sure that all contents
|
||||
// inside the parent path was checked. If the current dir has no subdirs with defined permissions inside it
|
||||
// and it has all the possible permissions we can stop scanning
|
||||
if !c.User.HasPermissionsInside(path.Dir(virtualSrcPath)) && !c.User.HasPermissionsInside(path.Dir(virtualDstPath)) {
|
||||
if c.User.HasPerm(dataprovider.PermRename, path.Dir(virtualSrcPath)) &&
|
||||
c.User.HasPerm(dataprovider.PermRename, path.Dir(virtualDstPath)) {
|
||||
return ErrSkipPermissionsCheck
|
||||
}
|
||||
if c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualSrcPath)) &&
|
||||
c.User.HasPerms(dstPerms, path.Dir(virtualDstPath)) {
|
||||
return ErrSkipPermissionsCheck
|
||||
}
|
||||
}
|
||||
if !c.isRenamePermitted(walkedPath, virtualSrcPath, virtualDstPath, info) {
|
||||
c.Log(logger.LevelInfo, "rename %#v -> %#v is not allowed, virtual destination path: %#v",
|
||||
walkedPath, dstPath, virtualDstPath)
|
||||
return os.ErrPermission
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err == ErrSkipPermissionsCheck {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *BaseConnection) isRenamePermitted(fsSourcePath, virtualSourcePath, virtualTargetPath string, fi os.FileInfo) bool {
|
||||
if c.Fs.GetRelativePath(fsSourcePath) == "/" {
|
||||
c.Log(logger.LevelWarn, "renaming root dir is not allowed")
|
||||
return false
|
||||
}
|
||||
if c.User.IsVirtualFolder(virtualSourcePath) || c.User.IsVirtualFolder(virtualTargetPath) {
|
||||
c.Log(logger.LevelWarn, "renaming a virtual folder is not allowed")
|
||||
return false
|
||||
}
|
||||
if !c.User.IsFileAllowed(virtualSourcePath) || !c.User.IsFileAllowed(virtualTargetPath) {
|
||||
if fi != nil && fi.Mode().IsRegular() {
|
||||
c.Log(logger.LevelDebug, "renaming file is not allowed, source: %#v target: %#v",
|
||||
virtualSourcePath, virtualTargetPath)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if c.User.HasPerm(dataprovider.PermRename, path.Dir(virtualSourcePath)) &&
|
||||
c.User.HasPerm(dataprovider.PermRename, path.Dir(virtualTargetPath)) {
|
||||
return true
|
||||
}
|
||||
if !c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualSourcePath)) {
|
||||
return false
|
||||
}
|
||||
if fi != nil {
|
||||
if fi.IsDir() {
|
||||
return c.User.HasPerm(dataprovider.PermCreateDirs, path.Dir(virtualTargetPath))
|
||||
} else if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
return c.User.HasPerm(dataprovider.PermCreateSymlinks, path.Dir(virtualTargetPath))
|
||||
}
|
||||
}
|
||||
return c.User.HasPerm(dataprovider.PermUpload, path.Dir(virtualTargetPath))
|
||||
}
|
||||
|
||||
func (c *BaseConnection) hasSpaceForRename(virtualSourcePath, virtualTargetPath string, initialSize int64,
|
||||
fsSourcePath string) bool {
|
||||
if dataprovider.GetQuotaTracking() == 0 {
|
||||
return true
|
||||
}
|
||||
sourceFolder, errSrc := c.User.GetVirtualFolderForPath(path.Dir(virtualSourcePath))
|
||||
dstFolder, errDst := c.User.GetVirtualFolderForPath(path.Dir(virtualTargetPath))
|
||||
if errSrc != nil && errDst != nil {
|
||||
// rename inside the user home dir
|
||||
return true
|
||||
}
|
||||
if errSrc == nil && errDst == nil {
|
||||
// rename between virtual folders
|
||||
if sourceFolder.MappedPath == dstFolder.MappedPath {
|
||||
// rename inside the same virtual folder
|
||||
return true
|
||||
}
|
||||
}
|
||||
if errSrc != nil && dstFolder.IsIncludedInUserQuota() {
|
||||
// rename between user root dir and a virtual folder included in user quota
|
||||
return true
|
||||
}
|
||||
quotaResult := c.HasSpace(true, virtualTargetPath)
|
||||
return c.hasSpaceForCrossRename(quotaResult, initialSize, fsSourcePath)
|
||||
}
|
||||
|
||||
// hasSpaceForCrossRename checks the quota after a rename between different folders
|
||||
func (c *BaseConnection) hasSpaceForCrossRename(quotaResult vfs.QuotaCheckResult, initialSize int64, sourcePath string) bool {
|
||||
if !quotaResult.HasSpace && initialSize == -1 {
|
||||
// we are over quota and this is not a file replace
|
||||
return false
|
||||
}
|
||||
fi, err := c.Fs.Lstat(sourcePath)
|
||||
if err != nil {
|
||||
c.Log(logger.LevelWarn, "cross rename denied, stat error for path %#v: %v", sourcePath, err)
|
||||
return false
|
||||
}
|
||||
var sizeDiff int64
|
||||
var filesDiff int
|
||||
if fi.Mode().IsRegular() {
|
||||
sizeDiff = fi.Size()
|
||||
filesDiff = 1
|
||||
if initialSize != -1 {
|
||||
sizeDiff -= initialSize
|
||||
filesDiff = 0
|
||||
}
|
||||
} else if fi.IsDir() {
|
||||
filesDiff, sizeDiff, err = c.Fs.GetDirSize(sourcePath)
|
||||
if err != nil {
|
||||
c.Log(logger.LevelWarn, "cross rename denied, error getting size for directory %#v: %v", sourcePath, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if !quotaResult.HasSpace && initialSize != -1 {
|
||||
// we are over quota but we are overwriting an existing file so we check if the quota size after the rename is ok
|
||||
if quotaResult.QuotaSize == 0 {
|
||||
return true
|
||||
}
|
||||
c.Log(logger.LevelDebug, "cross rename overwrite, source %#v, used size %v, size to add %v",
|
||||
sourcePath, quotaResult.UsedSize, sizeDiff)
|
||||
quotaResult.UsedSize += sizeDiff
|
||||
return quotaResult.GetRemainingSize() >= 0
|
||||
}
|
||||
if quotaResult.QuotaFiles > 0 {
|
||||
remainingFiles := quotaResult.GetRemainingFiles()
|
||||
c.Log(logger.LevelDebug, "cross rename, source %#v remaining file %v to add %v", sourcePath,
|
||||
remainingFiles, filesDiff)
|
||||
if remainingFiles < filesDiff {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if quotaResult.QuotaSize > 0 {
|
||||
remainingSize := quotaResult.GetRemainingSize()
|
||||
c.Log(logger.LevelDebug, "cross rename, source %#v remaining size %v to add %v", sourcePath,
|
||||
remainingSize, sizeDiff)
|
||||
if remainingSize < sizeDiff {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// HasSpace checks user's quota usage
|
||||
func (c *BaseConnection) HasSpace(checkFiles bool, requestPath string) vfs.QuotaCheckResult {
|
||||
result := vfs.QuotaCheckResult{
|
||||
HasSpace: true,
|
||||
AllowedSize: 0,
|
||||
AllowedFiles: 0,
|
||||
UsedSize: 0,
|
||||
UsedFiles: 0,
|
||||
QuotaSize: 0,
|
||||
QuotaFiles: 0,
|
||||
}
|
||||
|
||||
if dataprovider.GetQuotaTracking() == 0 {
|
||||
return result
|
||||
}
|
||||
var err error
|
||||
var vfolder vfs.VirtualFolder
|
||||
vfolder, err = c.User.GetVirtualFolderForPath(path.Dir(requestPath))
|
||||
if err == nil && !vfolder.IsIncludedInUserQuota() {
|
||||
if vfolder.HasNoQuotaRestrictions(checkFiles) {
|
||||
return result
|
||||
}
|
||||
result.QuotaSize = vfolder.QuotaSize
|
||||
result.QuotaFiles = vfolder.QuotaFiles
|
||||
result.UsedFiles, result.UsedSize, err = dataprovider.GetUsedVirtualFolderQuota(vfolder.MappedPath)
|
||||
} else {
|
||||
if c.User.HasNoQuotaRestrictions(checkFiles) {
|
||||
return result
|
||||
}
|
||||
result.QuotaSize = c.User.QuotaSize
|
||||
result.QuotaFiles = c.User.QuotaFiles
|
||||
result.UsedFiles, result.UsedSize, err = dataprovider.GetUsedQuota(c.User.Username)
|
||||
}
|
||||
if err != nil {
|
||||
c.Log(logger.LevelWarn, "error getting used quota for %#v request path %#v: %v", c.User.Username, requestPath, err)
|
||||
result.HasSpace = false
|
||||
return result
|
||||
}
|
||||
result.AllowedFiles = result.QuotaFiles - result.UsedFiles
|
||||
result.AllowedSize = result.QuotaSize - result.UsedSize
|
||||
if (checkFiles && result.QuotaFiles > 0 && result.UsedFiles >= result.QuotaFiles) ||
|
||||
(result.QuotaSize > 0 && result.UsedSize >= result.QuotaSize) {
|
||||
c.Log(logger.LevelDebug, "quota exceed for user %#v, request path %#v, num files: %v/%v, size: %v/%v check files: %v",
|
||||
c.User.Username, requestPath, result.UsedFiles, result.QuotaFiles, result.UsedSize, result.QuotaSize, checkFiles)
|
||||
result.HasSpace = false
|
||||
return result
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *BaseConnection) isCrossFoldersRequest(virtualSourcePath, virtualTargetPath string) bool {
|
||||
sourceFolder, errSrc := c.User.GetVirtualFolderForPath(virtualSourcePath)
|
||||
dstFolder, errDst := c.User.GetVirtualFolderForPath(virtualTargetPath)
|
||||
if errSrc != nil && errDst != nil {
|
||||
return false
|
||||
}
|
||||
if errSrc == nil && errDst == nil {
|
||||
return sourceFolder.MappedPath != dstFolder.MappedPath
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *BaseConnection) updateQuotaMoveBetweenVFolders(sourceFolder, dstFolder vfs.VirtualFolder, initialSize,
|
||||
filesSize int64, numFiles int) {
|
||||
if sourceFolder.MappedPath == dstFolder.MappedPath {
|
||||
// both files are inside the same virtual folder
|
||||
if initialSize != -1 {
|
||||
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, -numFiles, -initialSize, false) //nolint:errcheck
|
||||
if dstFolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(c.User, -numFiles, -initialSize, false) //nolint:errcheck
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
// files are inside different virtual folders
|
||||
dataprovider.UpdateVirtualFolderQuota(sourceFolder.BaseVirtualFolder, -numFiles, -filesSize, false) //nolint:errcheck
|
||||
if sourceFolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(c.User, -numFiles, -filesSize, false) //nolint:errcheck
|
||||
}
|
||||
if initialSize == -1 {
|
||||
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, numFiles, filesSize, false) //nolint:errcheck
|
||||
if dstFolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(c.User, numFiles, filesSize, false) //nolint:errcheck
|
||||
}
|
||||
} else {
|
||||
// we cannot have a directory here, initialSize != -1 only for files
|
||||
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, 0, filesSize-initialSize, false) //nolint:errcheck
|
||||
if dstFolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(c.User, 0, filesSize-initialSize, false) //nolint:errcheck
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BaseConnection) updateQuotaMoveFromVFolder(sourceFolder vfs.VirtualFolder, initialSize, filesSize int64, numFiles int) {
|
||||
// move between a virtual folder and the user home dir
|
||||
dataprovider.UpdateVirtualFolderQuota(sourceFolder.BaseVirtualFolder, -numFiles, -filesSize, false) //nolint:errcheck
|
||||
if sourceFolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(c.User, -numFiles, -filesSize, false) //nolint:errcheck
|
||||
}
|
||||
if initialSize == -1 {
|
||||
dataprovider.UpdateUserQuota(c.User, numFiles, filesSize, false) //nolint:errcheck
|
||||
} else {
|
||||
// we cannot have a directory here, initialSize != -1 only for files
|
||||
dataprovider.UpdateUserQuota(c.User, 0, filesSize-initialSize, false) //nolint:errcheck
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BaseConnection) updateQuotaMoveToVFolder(dstFolder vfs.VirtualFolder, initialSize, filesSize int64, numFiles int) {
|
||||
// move between the user home dir and a virtual folder
|
||||
dataprovider.UpdateUserQuota(c.User, -numFiles, -filesSize, false) //nolint:errcheck
|
||||
if initialSize == -1 {
|
||||
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, numFiles, filesSize, false) //nolint:errcheck
|
||||
if dstFolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(c.User, numFiles, filesSize, false) //nolint:errcheck
|
||||
}
|
||||
} else {
|
||||
// we cannot have a directory here, initialSize != -1 only for files
|
||||
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, 0, filesSize-initialSize, false) //nolint:errcheck
|
||||
if dstFolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(c.User, 0, filesSize-initialSize, false) //nolint:errcheck
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BaseConnection) updateQuotaAfterRename(virtualSourcePath, virtualTargetPath, targetPath string, initialSize int64) error {
|
||||
// we don't allow to overwrite an existing directory so targetPath can be:
|
||||
// - a new file, a symlink is as a new file here
|
||||
// - a file overwriting an existing one
|
||||
// - a new directory
|
||||
// initialSize != -1 only when overwriting files
|
||||
sourceFolder, errSrc := c.User.GetVirtualFolderForPath(path.Dir(virtualSourcePath))
|
||||
dstFolder, errDst := c.User.GetVirtualFolderForPath(path.Dir(virtualTargetPath))
|
||||
if errSrc != nil && errDst != nil {
|
||||
// both files are contained inside the user home dir
|
||||
if initialSize != -1 {
|
||||
// we cannot have a directory here
|
||||
dataprovider.UpdateUserQuota(c.User, -1, -initialSize, false) //nolint:errcheck
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
filesSize := int64(0)
|
||||
numFiles := 1
|
||||
if fi, err := c.Fs.Stat(targetPath); err == nil {
|
||||
if fi.Mode().IsDir() {
|
||||
numFiles, filesSize, err = c.Fs.GetDirSize(targetPath)
|
||||
if err != nil {
|
||||
c.Log(logger.LevelWarn, "failed to update quota after rename, error scanning moved folder %#v: %v",
|
||||
targetPath, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
filesSize = fi.Size()
|
||||
}
|
||||
} else {
|
||||
c.Log(logger.LevelWarn, "failed to update quota after rename, file %#v stat error: %+v", targetPath, err)
|
||||
return err
|
||||
}
|
||||
if errSrc == nil && errDst == nil {
|
||||
c.updateQuotaMoveBetweenVFolders(sourceFolder, dstFolder, initialSize, filesSize, numFiles)
|
||||
}
|
||||
if errSrc == nil && errDst != nil {
|
||||
c.updateQuotaMoveFromVFolder(sourceFolder, initialSize, filesSize, numFiles)
|
||||
}
|
||||
if errSrc != nil && errDst == nil {
|
||||
c.updateQuotaMoveToVFolder(dstFolder, initialSize, filesSize, numFiles)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPermissionDeniedError returns an appropriate permission denied error for the connection protocol
|
||||
func (c *BaseConnection) GetPermissionDeniedError() error {
|
||||
switch c.protocol {
|
||||
case ProtocolSFTP:
|
||||
return sftp.ErrSSHFxPermissionDenied
|
||||
default:
|
||||
return ErrPermissionDenied
|
||||
}
|
||||
}
|
||||
|
||||
// GetNotExistError returns an appropriate not exist error for the connection protocol
|
||||
func (c *BaseConnection) GetNotExistError() error {
|
||||
switch c.protocol {
|
||||
case ProtocolSFTP:
|
||||
return sftp.ErrSSHFxNoSuchFile
|
||||
default:
|
||||
return ErrNotExist
|
||||
}
|
||||
}
|
||||
|
||||
// GetOpUnsupportedError returns an appropriate operation not supported error for the connection protocol
|
||||
func (c *BaseConnection) GetOpUnsupportedError() error {
|
||||
switch c.protocol {
|
||||
case ProtocolSFTP:
|
||||
return sftp.ErrSSHFxOpUnsupported
|
||||
default:
|
||||
return ErrOpUnsupported
|
||||
}
|
||||
}
|
||||
|
||||
// GetGenericError returns an appropriate generic error for the connection protocol
|
||||
func (c *BaseConnection) GetGenericError() error {
|
||||
switch c.protocol {
|
||||
case ProtocolSFTP:
|
||||
return sftp.ErrSSHFxFailure
|
||||
default:
|
||||
return ErrGenericFailure
|
||||
}
|
||||
}
|
||||
|
||||
// GetFsError converts a filesystem error to a protocol error
|
||||
func (c *BaseConnection) GetFsError(err error) error {
|
||||
if c.Fs.IsNotExist(err) {
|
||||
return c.GetNotExistError()
|
||||
} else if c.Fs.IsPermission(err) {
|
||||
return c.GetPermissionDeniedError()
|
||||
} else if err != nil {
|
||||
return c.GetGenericError()
|
||||
}
|
||||
return nil
|
||||
}
|
1031
common/connection_test.go
Normal file
1031
common/connection_test.go
Normal file
File diff suppressed because it is too large
Load diff
54
common/tlsutils.go
Normal file
54
common/tlsutils.go
Normal file
|
@ -0,0 +1,54 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"sync"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
)
|
||||
|
||||
// CertManager defines a TLS certificate manager
|
||||
type CertManager struct {
|
||||
certPath string
|
||||
keyPath string
|
||||
sync.RWMutex
|
||||
cert *tls.Certificate
|
||||
}
|
||||
|
||||
// LoadCertificate loads the configured x509 key pair
|
||||
func (m *CertManager) LoadCertificate(logSender string) error {
|
||||
newCert, err := tls.LoadX509KeyPair(m.certPath, m.keyPath)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "unable to load X509 ket pair, cert file %#v key file %#v error: %v",
|
||||
m.certPath, m.keyPath, err)
|
||||
return err
|
||||
}
|
||||
logger.Debug(logSender, "", "TLS certificate %#v successfully loaded", m.certPath)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.cert = &newCert
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCertificateFunc returns the loaded certificate
|
||||
func (m *CertManager) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return m.cert, nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewCertManager creates a new certificate manager
|
||||
func NewCertManager(certificateFile, certificateKeyFile, logSender string) (*CertManager, error) {
|
||||
manager := &CertManager{
|
||||
cert: nil,
|
||||
certPath: certificateFile,
|
||||
keyPath: certificateKeyFile,
|
||||
}
|
||||
err := manager.LoadCertificate(logSender)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return manager, nil
|
||||
}
|
69
common/tlsutils_test.go
Normal file
69
common/tlsutils_test.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
httpsCert = `-----BEGIN CERTIFICATE-----
|
||||
MIICHTCCAaKgAwIBAgIUHnqw7QnB1Bj9oUsNpdb+ZkFPOxMwCgYIKoZIzj0EAwIw
|
||||
RTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGElu
|
||||
dGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDAyMDQwOTUzMDRaFw0zMDAyMDEw
|
||||
OTUzMDRaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYD
|
||||
VQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwdjAQBgcqhkjOPQIBBgUrgQQA
|
||||
IgNiAARCjRMqJ85rzMC998X5z761nJ+xL3bkmGVqWvrJ51t5OxV0v25NsOgR82CA
|
||||
NXUgvhVYs7vNFN+jxtb2aj6Xg+/2G/BNxkaFspIVCzgWkxiz7XE4lgUwX44FCXZM
|
||||
3+JeUbKjUzBRMB0GA1UdDgQWBBRhLw+/o3+Z02MI/d4tmaMui9W16jAfBgNVHSME
|
||||
GDAWgBRhLw+/o3+Z02MI/d4tmaMui9W16jAPBgNVHRMBAf8EBTADAQH/MAoGCCqG
|
||||
SM49BAMCA2kAMGYCMQDqLt2lm8mE+tGgtjDmtFgdOcI72HSbRQ74D5rYTzgST1rY
|
||||
/8wTi5xl8TiFUyLMUsICMQC5ViVxdXbhuG7gX6yEqSkMKZICHpO8hqFwOD/uaFVI
|
||||
dV4vKmHUzwK/eIx+8Ay3neE=
|
||||
-----END CERTIFICATE-----`
|
||||
httpsKey = `-----BEGIN EC PARAMETERS-----
|
||||
BgUrgQQAIg==
|
||||
-----END EC PARAMETERS-----
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDCfMNsN6miEE3rVyUPwElfiJSWaR5huPCzUenZOfJT04GAcQdWvEju3
|
||||
UM2lmBLIXpGgBwYFK4EEACKhZANiAARCjRMqJ85rzMC998X5z761nJ+xL3bkmGVq
|
||||
WvrJ51t5OxV0v25NsOgR82CANXUgvhVYs7vNFN+jxtb2aj6Xg+/2G/BNxkaFspIV
|
||||
CzgWkxiz7XE4lgUwX44FCXZM3+JeUbI=
|
||||
-----END EC PRIVATE KEY-----`
|
||||
)
|
||||
|
||||
func TestLoadCertificate(t *testing.T) {
|
||||
certPath := filepath.Join(os.TempDir(), "test.crt")
|
||||
keyPath := filepath.Join(os.TempDir(), "test.key")
|
||||
err := ioutil.WriteFile(certPath, []byte(httpsCert), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = ioutil.WriteFile(keyPath, []byte(httpsKey), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
certManager, err := NewCertManager(certPath, keyPath, logSender)
|
||||
assert.NoError(t, err)
|
||||
certFunc := certManager.GetCertificateFunc()
|
||||
if assert.NotNil(t, certFunc) {
|
||||
hello := &tls.ClientHelloInfo{
|
||||
ServerName: "localhost",
|
||||
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305},
|
||||
}
|
||||
cert, err := certFunc(hello)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, certManager.cert, cert)
|
||||
}
|
||||
|
||||
err = os.Remove(certPath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(keyPath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLoadInvalidCert(t *testing.T) {
|
||||
certManager, err := NewCertManager("test.crt", "test.key", logSender)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, certManager)
|
||||
}
|
215
common/transfer.go
Normal file
215
common/transfer.go
Normal file
|
@ -0,0 +1,215 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTransferClosed defines the error returned for a closed transfer
|
||||
ErrTransferClosed = errors.New("transfer already closed")
|
||||
)
|
||||
|
||||
// BaseTransfer contains protocols common transfer details for an upload or a download.
|
||||
type BaseTransfer struct {
|
||||
ID uint64
|
||||
File *os.File
|
||||
Connection *BaseConnection
|
||||
cancelFn func()
|
||||
fsPath string
|
||||
start time.Time
|
||||
transferType int
|
||||
MinWriteOffset int64
|
||||
InitialSize int64
|
||||
isNewFile bool
|
||||
requestPath string
|
||||
BytesSent int64
|
||||
BytesReceived int64
|
||||
sync.Mutex
|
||||
ErrTransfer error
|
||||
}
|
||||
|
||||
// NewBaseTransfer returns a new BaseTransfer and adds it to the given connection
|
||||
func NewBaseTransfer(file *os.File, conn *BaseConnection, cancelFn func(), fsPath, requestPath string, transferType int,
|
||||
minWriteOffset, initialSize int64, isNewFile bool) *BaseTransfer {
|
||||
t := &BaseTransfer{
|
||||
ID: conn.GetTransferID(),
|
||||
File: file,
|
||||
Connection: conn,
|
||||
cancelFn: cancelFn,
|
||||
fsPath: fsPath,
|
||||
start: time.Now(),
|
||||
transferType: transferType,
|
||||
MinWriteOffset: minWriteOffset,
|
||||
InitialSize: initialSize,
|
||||
isNewFile: isNewFile,
|
||||
requestPath: requestPath,
|
||||
BytesSent: 0,
|
||||
BytesReceived: 0,
|
||||
}
|
||||
conn.AddTransfer(t)
|
||||
return t
|
||||
}
|
||||
|
||||
// GetID returns the transfer ID
|
||||
func (t *BaseTransfer) GetID() uint64 {
|
||||
return t.ID
|
||||
}
|
||||
|
||||
// GetType returns the transfer type
|
||||
func (t *BaseTransfer) GetType() int {
|
||||
return t.transferType
|
||||
}
|
||||
|
||||
// GetSize returns the transferred size
|
||||
func (t *BaseTransfer) GetSize() int64 {
|
||||
if t.transferType == TransferDownload {
|
||||
return atomic.LoadInt64(&t.BytesSent)
|
||||
}
|
||||
return atomic.LoadInt64(&t.BytesReceived)
|
||||
}
|
||||
|
||||
// GetStartTime returns the start time
|
||||
func (t *BaseTransfer) GetStartTime() time.Time {
|
||||
return t.start
|
||||
}
|
||||
|
||||
// GetVirtualPath returns the transfer virtual path
|
||||
func (t *BaseTransfer) GetVirtualPath() string {
|
||||
return t.requestPath
|
||||
}
|
||||
|
||||
// TransferError is called if there is an unexpected error.
|
||||
// For example network or client issues
|
||||
func (t *BaseTransfer) TransferError(err error) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
if t.ErrTransfer != nil {
|
||||
return
|
||||
}
|
||||
t.ErrTransfer = err
|
||||
if t.cancelFn != nil {
|
||||
t.cancelFn()
|
||||
}
|
||||
elapsed := time.Since(t.start).Nanoseconds() / 1000000
|
||||
t.Connection.Log(logger.LevelWarn, "Unexpected error for transfer, path: %#v, error: \"%v\" bytes sent: %v, "+
|
||||
"bytes received: %v transfer running since %v ms", t.fsPath, t.ErrTransfer, atomic.LoadInt64(&t.BytesSent),
|
||||
atomic.LoadInt64(&t.BytesReceived), elapsed)
|
||||
}
|
||||
|
||||
// Close it is called when the transfer is completed.
|
||||
// It closes the underlying file, logs the transfer info, updates the
|
||||
// user quota (for uploads) and executes any defined action.
|
||||
// If there is an error no action will be executed and, in atomic mode,
|
||||
// we try to delete the temporary file
|
||||
func (t *BaseTransfer) Close() error {
|
||||
defer t.Connection.RemoveTransfer(t)
|
||||
|
||||
var err error
|
||||
numFiles := 0
|
||||
if t.isNewFile {
|
||||
numFiles = 1
|
||||
}
|
||||
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
|
||||
if t.ErrTransfer == ErrQuotaExceeded && t.File != nil {
|
||||
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
|
||||
err = os.Remove(t.File.Name())
|
||||
if err == nil {
|
||||
numFiles--
|
||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
||||
t.MinWriteOffset = 0
|
||||
}
|
||||
t.Connection.Log(logger.LevelWarn, "upload denied due to space limit, delete temporary file: %#v, deletion error: %v",
|
||||
t.File.Name(), err)
|
||||
} else if t.transferType == TransferUpload && t.File != nil && t.File.Name() != t.fsPath {
|
||||
if t.ErrTransfer == nil || Config.UploadMode == UploadModeAtomicWithResume {
|
||||
err = os.Rename(t.File.Name(), t.fsPath)
|
||||
t.Connection.Log(logger.LevelDebug, "atomic upload completed, rename: %#v -> %#v, error: %v",
|
||||
t.File.Name(), t.fsPath, err)
|
||||
} else {
|
||||
err = os.Remove(t.File.Name())
|
||||
t.Connection.Log(logger.LevelWarn, "atomic upload completed with error: \"%v\", delete temporary file: %#v, "+
|
||||
"deletion error: %v", t.ErrTransfer, t.File.Name(), err)
|
||||
if err == nil {
|
||||
numFiles--
|
||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
||||
t.MinWriteOffset = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
elapsed := time.Since(t.start).Nanoseconds() / 1000000
|
||||
if t.transferType == TransferDownload {
|
||||
logger.TransferLog(downloadLogSender, t.fsPath, elapsed, atomic.LoadInt64(&t.BytesSent), t.Connection.User.Username,
|
||||
t.Connection.ID, t.Connection.protocol)
|
||||
action := newActionNotification(&t.Connection.User, operationDownload, t.fsPath, "", "", t.Connection.protocol,
|
||||
atomic.LoadInt64(&t.BytesSent), t.ErrTransfer)
|
||||
go action.execute() //nolint:errcheck
|
||||
} else {
|
||||
logger.TransferLog(uploadLogSender, t.fsPath, elapsed, atomic.LoadInt64(&t.BytesReceived), t.Connection.User.Username,
|
||||
t.Connection.ID, t.Connection.protocol)
|
||||
action := newActionNotification(&t.Connection.User, operationUpload, t.fsPath, "", "", t.Connection.protocol,
|
||||
atomic.LoadInt64(&t.BytesReceived)+t.MinWriteOffset, t.ErrTransfer)
|
||||
go action.execute() //nolint:errcheck
|
||||
}
|
||||
if t.ErrTransfer != nil {
|
||||
t.Connection.Log(logger.LevelWarn, "transfer error: %v, path: %#v", t.ErrTransfer, t.fsPath)
|
||||
if err == nil {
|
||||
err = t.ErrTransfer
|
||||
}
|
||||
}
|
||||
t.updateQuota(numFiles)
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *BaseTransfer) updateQuota(numFiles int) bool {
|
||||
// S3 uploads are atomic, if there is an error nothing is uploaded
|
||||
if t.File == nil && t.ErrTransfer != nil {
|
||||
return false
|
||||
}
|
||||
bytesReceived := atomic.LoadInt64(&t.BytesReceived)
|
||||
if t.transferType == TransferUpload && (numFiles != 0 || bytesReceived > 0) {
|
||||
vfolder, err := t.Connection.User.GetVirtualFolderForPath(path.Dir(t.requestPath))
|
||||
if err == nil {
|
||||
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, numFiles, //nolint:errcheck
|
||||
bytesReceived-t.InitialSize, false)
|
||||
if vfolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(t.Connection.User, numFiles, bytesReceived-t.InitialSize, false) //nolint:errcheck
|
||||
}
|
||||
} else {
|
||||
dataprovider.UpdateUserQuota(t.Connection.User, numFiles, bytesReceived-t.InitialSize, false) //nolint:errcheck
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HandleThrottle manage bandwidth throttling
|
||||
func (t *BaseTransfer) HandleThrottle() {
|
||||
var wantedBandwidth int64
|
||||
var trasferredBytes int64
|
||||
if t.transferType == TransferDownload {
|
||||
wantedBandwidth = t.Connection.User.DownloadBandwidth
|
||||
trasferredBytes = atomic.LoadInt64(&t.BytesSent)
|
||||
} else {
|
||||
wantedBandwidth = t.Connection.User.UploadBandwidth
|
||||
trasferredBytes = atomic.LoadInt64(&t.BytesReceived)
|
||||
}
|
||||
if wantedBandwidth > 0 {
|
||||
// real and wanted elapsed as milliseconds, bytes as kilobytes
|
||||
realElapsed := time.Since(t.start).Nanoseconds() / 1000000
|
||||
// trasferredBytes / 1000 = KB/s, we multiply for 1000 to get milliseconds
|
||||
wantedElapsed := 1000 * (trasferredBytes / 1000) / wantedBandwidth
|
||||
if wantedElapsed > realElapsed {
|
||||
toSleep := time.Duration(wantedElapsed - realElapsed)
|
||||
time.Sleep(toSleep * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
157
common/transfer_test.go
Normal file
157
common/transfer_test.go
Normal file
|
@ -0,0 +1,157 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
func TestTransferUpdateQuota(t *testing.T) {
|
||||
conn := NewBaseConnection("", ProtocolSFTP, dataprovider.User{}, nil)
|
||||
transfer := BaseTransfer{
|
||||
Connection: conn,
|
||||
transferType: TransferUpload,
|
||||
BytesReceived: 123,
|
||||
}
|
||||
errFake := errors.New("fake error")
|
||||
transfer.TransferError(errFake)
|
||||
assert.False(t, transfer.updateQuota(1))
|
||||
err := transfer.Close()
|
||||
if assert.Error(t, err) {
|
||||
assert.EqualError(t, err, errFake.Error())
|
||||
}
|
||||
mappedPath := filepath.Join(os.TempDir(), "vdir")
|
||||
vdirPath := "/vdir"
|
||||
conn.User.VirtualFolders = append(conn.User.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
MappedPath: mappedPath,
|
||||
},
|
||||
VirtualPath: vdirPath,
|
||||
QuotaFiles: -1,
|
||||
QuotaSize: -1,
|
||||
})
|
||||
transfer.ErrTransfer = nil
|
||||
transfer.BytesReceived = 1
|
||||
transfer.requestPath = "/vdir/file"
|
||||
assert.True(t, transfer.updateQuota(1))
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTransferThrottling(t *testing.T) {
|
||||
u := dataprovider.User{
|
||||
Username: "test",
|
||||
UploadBandwidth: 50,
|
||||
DownloadBandwidth: 40,
|
||||
}
|
||||
testFileSize := int64(131072)
|
||||
wantedUploadElapsed := 1000 * (testFileSize / 1000) / u.UploadBandwidth
|
||||
wantedDownloadElapsed := 1000 * (testFileSize / 1000) / u.DownloadBandwidth
|
||||
// 100 ms tolerance
|
||||
wantedUploadElapsed -= 100
|
||||
wantedDownloadElapsed -= 100
|
||||
conn := NewBaseConnection("id", ProtocolSCP, u, nil)
|
||||
transfer := NewBaseTransfer(nil, conn, nil, "", "", TransferUpload, 0, 0, true)
|
||||
transfer.BytesReceived = testFileSize
|
||||
transfer.Connection.UpdateLastActivity()
|
||||
startTime := transfer.Connection.GetLastActivity()
|
||||
transfer.HandleThrottle()
|
||||
elapsed := time.Since(startTime).Nanoseconds() / 1000000
|
||||
assert.GreaterOrEqual(t, elapsed, wantedUploadElapsed, "upload bandwidth throttling not respected")
|
||||
err := transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
transfer = NewBaseTransfer(nil, conn, nil, "", "", TransferDownload, 0, 0, true)
|
||||
transfer.BytesSent = testFileSize
|
||||
transfer.Connection.UpdateLastActivity()
|
||||
startTime = transfer.Connection.GetLastActivity()
|
||||
|
||||
transfer.HandleThrottle()
|
||||
elapsed = time.Since(startTime).Nanoseconds() / 1000000
|
||||
assert.GreaterOrEqual(t, elapsed, wantedDownloadElapsed, "download bandwidth throttling not respected")
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTransferErrors(t *testing.T) {
|
||||
isCancelled := false
|
||||
cancelFn := func() {
|
||||
isCancelled = true
|
||||
}
|
||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||
fs := vfs.NewOsFs("id", os.TempDir(), nil)
|
||||
u := dataprovider.User{
|
||||
Username: "test",
|
||||
HomeDir: os.TempDir(),
|
||||
}
|
||||
err := ioutil.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
file, err := os.Open(testFile)
|
||||
if !assert.NoError(t, err) {
|
||||
assert.FailNow(t, "unable to open test file")
|
||||
}
|
||||
conn := NewBaseConnection("id", ProtocolSFTP, u, fs)
|
||||
transfer := NewBaseTransfer(file, conn, cancelFn, testFile, "/transfer_test_file", TransferUpload, 0, 0, true)
|
||||
errFake := errors.New("err fake")
|
||||
transfer.BytesReceived = 9
|
||||
transfer.TransferError(ErrQuotaExceeded)
|
||||
assert.True(t, isCancelled)
|
||||
transfer.TransferError(errFake)
|
||||
assert.Error(t, transfer.ErrTransfer, ErrQuotaExceeded.Error())
|
||||
// the file is closed from the embedding struct before to call close
|
||||
err = file.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer.Close()
|
||||
if assert.Error(t, err) {
|
||||
assert.Error(t, err, ErrQuotaExceeded.Error())
|
||||
}
|
||||
assert.NoFileExists(t, testFile)
|
||||
|
||||
err = ioutil.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
file, err = os.Open(testFile)
|
||||
if !assert.NoError(t, err) {
|
||||
assert.FailNow(t, "unable to open test file")
|
||||
}
|
||||
fsPath := filepath.Join(os.TempDir(), "test_file")
|
||||
transfer = NewBaseTransfer(file, conn, nil, fsPath, "/test_file", TransferUpload, 0, 0, true)
|
||||
transfer.BytesReceived = 9
|
||||
transfer.TransferError(errFake)
|
||||
assert.Error(t, transfer.ErrTransfer, errFake.Error())
|
||||
// the file is closed from the embedding struct before to call close
|
||||
err = file.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer.Close()
|
||||
if assert.Error(t, err) {
|
||||
assert.Error(t, err, errFake.Error())
|
||||
}
|
||||
assert.NoFileExists(t, testFile)
|
||||
|
||||
err = ioutil.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
file, err = os.Open(testFile)
|
||||
if !assert.NoError(t, err) {
|
||||
assert.FailNow(t, "unable to open test file")
|
||||
}
|
||||
transfer = NewBaseTransfer(file, conn, nil, fsPath, "/test_file", TransferUpload, 0, 0, true)
|
||||
transfer.BytesReceived = 9
|
||||
// the file is closed from the embedding struct before to call close
|
||||
err = file.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.NoFileExists(t, testFile)
|
||||
assert.FileExists(t, fsPath)
|
||||
err = os.Remove(fsPath)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, conn.GetTransfers(), 0)
|
||||
}
|
138
config/config.go
138
config/config.go
|
@ -1,8 +1,4 @@
|
|||
// Package config manages the configuration.
|
||||
// Configuration is loaded from sftpgo.conf file.
|
||||
// If sftpgo.conf is not found or cannot be readed or decoded as json the default configuration is used.
|
||||
// The default configuration an be found inside the source tree:
|
||||
// https://github.com/drakkan/sftpgo/blob/master/sftpgo.conf
|
||||
// Package config manages the configuration
|
||||
package config
|
||||
|
||||
import (
|
||||
|
@ -11,6 +7,7 @@ import (
|
|||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpclient"
|
||||
"github.com/drakkan/sftpgo/httpd"
|
||||
|
@ -36,27 +33,32 @@ var (
|
|||
)
|
||||
|
||||
type globalConfig struct {
|
||||
SFTPD sftpd.Configuration `json:"sftpd" mapstructure:"sftpd"`
|
||||
ProviderConf dataprovider.Config `json:"data_provider" mapstructure:"data_provider"`
|
||||
HTTPDConfig httpd.Conf `json:"httpd" mapstructure:"httpd"`
|
||||
HTTPConfig httpclient.Config `json:"http" mapstructure:"http"`
|
||||
Common common.Configuration `json:"common" mapstructure:"common"`
|
||||
SFTPD sftpd.Configuration `json:"sftpd" mapstructure:"sftpd"`
|
||||
ProviderConf dataprovider.Config `json:"data_provider" mapstructure:"data_provider"`
|
||||
HTTPDConfig httpd.Conf `json:"httpd" mapstructure:"httpd"`
|
||||
HTTPConfig httpclient.Config `json:"http" mapstructure:"http"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
// create a default configuration to use if no config file is provided
|
||||
globalConf = globalConfig{
|
||||
SFTPD: sftpd.Configuration{
|
||||
Banner: defaultBanner,
|
||||
BindPort: 2022,
|
||||
BindAddress: "",
|
||||
IdleTimeout: 15,
|
||||
MaxAuthTries: 0,
|
||||
Umask: "0022",
|
||||
UploadMode: 0,
|
||||
Actions: sftpd.Actions{
|
||||
Common: common.Configuration{
|
||||
IdleTimeout: 15,
|
||||
UploadMode: 0,
|
||||
Actions: common.ProtocolActions{
|
||||
ExecuteOn: []string{},
|
||||
Hook: "",
|
||||
},
|
||||
SetstatMode: 0,
|
||||
ProxyProtocol: 0,
|
||||
ProxyAllowed: []string{},
|
||||
},
|
||||
SFTPD: sftpd.Configuration{
|
||||
Banner: defaultBanner,
|
||||
BindPort: 2022,
|
||||
BindAddress: "",
|
||||
MaxAuthTries: 0,
|
||||
HostKeys: []string{},
|
||||
KexAlgorithms: []string{},
|
||||
Ciphers: []string{},
|
||||
|
@ -65,8 +67,6 @@ func init() {
|
|||
LoginBannerFile: "",
|
||||
EnabledSSHCommands: sftpd.GetDefaultSSHCommands(),
|
||||
KeyboardInteractiveHook: "",
|
||||
ProxyProtocol: 0,
|
||||
ProxyAllowed: []string{},
|
||||
},
|
||||
ProviderConf: dataprovider.Config{
|
||||
Driver: "sqlite",
|
||||
|
@ -82,7 +82,7 @@ func init() {
|
|||
TrackQuota: 1,
|
||||
PoolSize: 0,
|
||||
UsersBaseDir: "",
|
||||
Actions: dataprovider.Actions{
|
||||
Actions: dataprovider.UserActions{
|
||||
ExecuteOn: []string{},
|
||||
Hook: "",
|
||||
},
|
||||
|
@ -116,6 +116,16 @@ func init() {
|
|||
viper.AllowEmptyEnv(true)
|
||||
}
|
||||
|
||||
// GetCommonConfig returns the common protocols configuration
|
||||
func GetCommonConfig() common.Configuration {
|
||||
return globalConf.Common
|
||||
}
|
||||
|
||||
// SetCommonConfig sets the common protocols configuration
|
||||
func SetCommonConfig(config common.Configuration) {
|
||||
globalConf.Common = config
|
||||
}
|
||||
|
||||
// GetSFTPDConfig returns the configuration for the SFTP server
|
||||
func GetSFTPDConfig() sftpd.Configuration {
|
||||
return globalConf.SFTPD
|
||||
|
@ -181,6 +191,7 @@ func LoadConfig(configDir, configName string) error {
|
|||
logger.WarnToConsole("error parsing configuration file: %v. Default configuration will be used.", err)
|
||||
return err
|
||||
}
|
||||
checkCommonParamsCompatibility()
|
||||
if strings.TrimSpace(globalConf.SFTPD.Banner) == "" {
|
||||
globalConf.SFTPD.Banner = defaultBanner
|
||||
}
|
||||
|
@ -190,17 +201,17 @@ func LoadConfig(configDir, configName string) error {
|
|||
logger.Warn(logSender, "", "Configuration error: %v", err)
|
||||
logger.WarnToConsole("Configuration error: %v", err)
|
||||
}
|
||||
if globalConf.SFTPD.UploadMode < 0 || globalConf.SFTPD.UploadMode > 2 {
|
||||
if globalConf.Common.UploadMode < 0 || globalConf.Common.UploadMode > 2 {
|
||||
err = fmt.Errorf("invalid upload_mode 0, 1 and 2 are supported, configured: %v reset upload_mode to 0",
|
||||
globalConf.SFTPD.UploadMode)
|
||||
globalConf.SFTPD.UploadMode = 0
|
||||
globalConf.Common.UploadMode)
|
||||
globalConf.Common.UploadMode = 0
|
||||
logger.Warn(logSender, "", "Configuration error: %v", err)
|
||||
logger.WarnToConsole("Configuration error: %v", err)
|
||||
}
|
||||
if globalConf.SFTPD.ProxyProtocol < 0 || globalConf.SFTPD.ProxyProtocol > 2 {
|
||||
if globalConf.Common.ProxyProtocol < 0 || globalConf.Common.ProxyProtocol > 2 {
|
||||
err = fmt.Errorf("invalid proxy_protocol 0, 1 and 2 are supported, configured: %v reset proxy_protocol to 0",
|
||||
globalConf.SFTPD.ProxyProtocol)
|
||||
globalConf.SFTPD.ProxyProtocol = 0
|
||||
globalConf.Common.ProxyProtocol)
|
||||
globalConf.Common.ProxyProtocol = 0
|
||||
logger.Warn(logSender, "", "Configuration error: %v", err)
|
||||
logger.WarnToConsole("Configuration error: %v", err)
|
||||
}
|
||||
|
@ -216,53 +227,11 @@ func LoadConfig(configDir, configName string) error {
|
|||
logger.Warn(logSender, "", "Configuration error: %v", err)
|
||||
logger.WarnToConsole("Configuration error: %v", err)
|
||||
}
|
||||
checkHooksCompatibility()
|
||||
checkHostKeyCompatibility()
|
||||
logger.Debug(logSender, "", "config file used: '%#v', config loaded: %+v", viper.ConfigFileUsed(), getRedactedGlobalConf())
|
||||
return err
|
||||
}
|
||||
|
||||
func checkHooksCompatibility() {
|
||||
// we copy deprecated fields to new ones to keep backward compatibility so lint is disabled
|
||||
if len(globalConf.ProviderConf.ExternalAuthProgram) > 0 && len(globalConf.ProviderConf.ExternalAuthHook) == 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "external_auth_program is deprecated, please use external_auth_hook")
|
||||
logger.WarnToConsole("external_auth_program is deprecated, please use external_auth_hook")
|
||||
globalConf.ProviderConf.ExternalAuthHook = globalConf.ProviderConf.ExternalAuthProgram //nolint:staticcheck
|
||||
}
|
||||
if len(globalConf.ProviderConf.PreLoginProgram) > 0 && len(globalConf.ProviderConf.PreLoginHook) == 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "pre_login_program is deprecated, please use pre_login_hook")
|
||||
logger.WarnToConsole("pre_login_program is deprecated, please use pre_login_hook")
|
||||
globalConf.ProviderConf.PreLoginHook = globalConf.ProviderConf.PreLoginProgram //nolint:staticcheck
|
||||
}
|
||||
if len(globalConf.SFTPD.KeyboardInteractiveProgram) > 0 && len(globalConf.SFTPD.KeyboardInteractiveHook) == 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "keyboard_interactive_auth_program is deprecated, please use keyboard_interactive_auth_hook")
|
||||
logger.WarnToConsole("keyboard_interactive_auth_program is deprecated, please use keyboard_interactive_auth_hook")
|
||||
globalConf.SFTPD.KeyboardInteractiveHook = globalConf.SFTPD.KeyboardInteractiveProgram //nolint:staticcheck
|
||||
}
|
||||
if len(globalConf.SFTPD.Actions.Hook) == 0 {
|
||||
if len(globalConf.SFTPD.Actions.HTTPNotificationURL) > 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "http_notification_url is deprecated, please use hook")
|
||||
logger.WarnToConsole("http_notification_url is deprecated, please use hook")
|
||||
globalConf.SFTPD.Actions.Hook = globalConf.SFTPD.Actions.HTTPNotificationURL //nolint:staticcheck
|
||||
} else if len(globalConf.SFTPD.Actions.Command) > 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "command is deprecated, please use hook")
|
||||
logger.WarnToConsole("command is deprecated, please use hook")
|
||||
globalConf.SFTPD.Actions.Hook = globalConf.SFTPD.Actions.Command //nolint:staticcheck
|
||||
}
|
||||
}
|
||||
if len(globalConf.ProviderConf.Actions.Hook) == 0 {
|
||||
if len(globalConf.ProviderConf.Actions.HTTPNotificationURL) > 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "http_notification_url is deprecated, please use hook")
|
||||
logger.WarnToConsole("http_notification_url is deprecated, please use hook")
|
||||
globalConf.ProviderConf.Actions.Hook = globalConf.ProviderConf.Actions.HTTPNotificationURL //nolint:staticcheck
|
||||
} else if len(globalConf.ProviderConf.Actions.Command) > 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "command is deprecated, please use hook")
|
||||
logger.WarnToConsole("command is deprecated, please use hook")
|
||||
globalConf.ProviderConf.Actions.Hook = globalConf.ProviderConf.Actions.Command //nolint:staticcheck
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkHostKeyCompatibility() {
|
||||
// we copy deprecated fields to new ones to keep backward compatibility so lint is disabled
|
||||
if len(globalConf.SFTPD.Keys) > 0 && len(globalConf.SFTPD.HostKeys) == 0 { //nolint:staticcheck
|
||||
|
@ -273,3 +242,34 @@ func checkHostKeyCompatibility() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkCommonParamsCompatibility() {
|
||||
// we copy deprecated fields to new ones to keep backward compatibility so lint is disabled
|
||||
if globalConf.SFTPD.IdleTimeout > 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "sftpd.idle_timeout is deprecated, please use common.idle_timeout")
|
||||
logger.WarnToConsole("sftpd.idle_timeout is deprecated, please use common.idle_timeout")
|
||||
globalConf.Common.IdleTimeout = globalConf.SFTPD.IdleTimeout //nolint:staticcheck
|
||||
}
|
||||
if len(globalConf.SFTPD.Actions.Hook) > 0 && len(globalConf.Common.Actions.Hook) == 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "sftpd.actions is deprecated, please use common.actions")
|
||||
logger.WarnToConsole("sftpd.actions is deprecated, please use common.actions")
|
||||
globalConf.Common.Actions.ExecuteOn = globalConf.SFTPD.Actions.ExecuteOn //nolint:staticcheck
|
||||
globalConf.Common.Actions.Hook = globalConf.SFTPD.Actions.Hook //nolint:staticcheck
|
||||
}
|
||||
if globalConf.SFTPD.SetstatMode > 0 && globalConf.Common.SetstatMode == 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "sftpd.setstat_mode is deprecated, please use common.setstat_mode")
|
||||
logger.WarnToConsole("sftpd.setstat_mode is deprecated, please use common.setstat_mode")
|
||||
globalConf.Common.SetstatMode = globalConf.SFTPD.SetstatMode //nolint:staticcheck
|
||||
}
|
||||
if globalConf.SFTPD.UploadMode > 0 && globalConf.Common.UploadMode == 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "sftpd.upload_mode is deprecated, please use common.upload_mode")
|
||||
logger.WarnToConsole("sftpd.upload_mode is deprecated, please use common.upload_mode")
|
||||
globalConf.Common.UploadMode = globalConf.SFTPD.UploadMode //nolint:staticcheck
|
||||
}
|
||||
if globalConf.SFTPD.ProxyProtocol > 0 && globalConf.Common.ProxyProtocol == 0 { //nolint:staticcheck
|
||||
logger.Warn(logSender, "", "sftpd.proxy_protocol is deprecated, please use common.proxy_protocol")
|
||||
logger.WarnToConsole("sftpd.proxy_protocol is deprecated, please use common.proxy_protocol")
|
||||
globalConf.Common.ProxyProtocol = globalConf.SFTPD.ProxyProtocol //nolint:staticcheck
|
||||
globalConf.Common.ProxyAllowed = globalConf.SFTPD.ProxyAllowed //nolint:staticcheck
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/config"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpclient"
|
||||
|
@ -74,10 +75,10 @@ func TestInvalidUploadMode(t *testing.T) {
|
|||
configFilePath := filepath.Join(configDir, confName)
|
||||
err := config.LoadConfig(configDir, configName)
|
||||
assert.NoError(t, err)
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.UploadMode = 10
|
||||
c := make(map[string]sftpd.Configuration)
|
||||
c["sftpd"] = sftpdConf
|
||||
commonConf := config.GetCommonConfig()
|
||||
commonConf.UploadMode = 10
|
||||
c := make(map[string]common.Configuration)
|
||||
c["common"] = commonConf
|
||||
jsonConf, err := json.Marshal(c)
|
||||
assert.NoError(t, err)
|
||||
err = ioutil.WriteFile(configFilePath, jsonConf, 0666)
|
||||
|
@ -134,10 +135,10 @@ func TestInvalidProxyProtocol(t *testing.T) {
|
|||
configFilePath := filepath.Join(configDir, confName)
|
||||
err := config.LoadConfig(configDir, configName)
|
||||
assert.NoError(t, err)
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.ProxyProtocol = 10
|
||||
c := make(map[string]sftpd.Configuration)
|
||||
c["sftpd"] = sftpdConf
|
||||
commonConf := config.GetCommonConfig()
|
||||
commonConf.ProxyProtocol = 10
|
||||
c := make(map[string]common.Configuration)
|
||||
c["common"] = commonConf
|
||||
jsonConf, err := json.Marshal(c)
|
||||
assert.NoError(t, err)
|
||||
err = ioutil.WriteFile(configFilePath, jsonConf, 0666)
|
||||
|
@ -168,72 +169,37 @@ func TestInvalidUsersBaseDir(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestHookCompatibity(t *testing.T) {
|
||||
func TestCommonParamsCompatibility(t *testing.T) {
|
||||
configDir := ".."
|
||||
confName := tempConfigName + ".json"
|
||||
configFilePath := filepath.Join(configDir, confName)
|
||||
err := config.LoadConfig(configDir, configName)
|
||||
assert.NoError(t, err)
|
||||
providerConf := config.GetProviderConf()
|
||||
providerConf.ExternalAuthProgram = "ext_auth_program" //nolint:staticcheck
|
||||
providerConf.PreLoginProgram = "pre_login_program" //nolint:staticcheck
|
||||
providerConf.Actions.Command = "/tmp/test_cmd" //nolint:staticcheck
|
||||
c := make(map[string]dataprovider.Config)
|
||||
c["data_provider"] = providerConf
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.IdleTimeout = 21 //nolint:staticcheck
|
||||
sftpdConf.Actions.Hook = "http://hook"
|
||||
sftpdConf.Actions.ExecuteOn = []string{"upload"}
|
||||
sftpdConf.SetstatMode = 1 //nolint:staticcheck
|
||||
sftpdConf.UploadMode = common.UploadModeAtomicWithResume //nolint:staticcheck
|
||||
sftpdConf.ProxyProtocol = 1 //nolint:staticcheck
|
||||
sftpdConf.ProxyAllowed = []string{"192.168.1.1"} //nolint:staticcheck
|
||||
c := make(map[string]sftpd.Configuration)
|
||||
c["sftpd"] = sftpdConf
|
||||
jsonConf, err := json.Marshal(c)
|
||||
assert.NoError(t, err)
|
||||
err = ioutil.WriteFile(configFilePath, jsonConf, 0666)
|
||||
assert.NoError(t, err)
|
||||
err = config.LoadConfig(configDir, tempConfigName)
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
assert.Equal(t, "ext_auth_program", providerConf.ExternalAuthHook)
|
||||
assert.Equal(t, "pre_login_program", providerConf.PreLoginHook)
|
||||
assert.Equal(t, "/tmp/test_cmd", providerConf.Actions.Hook)
|
||||
err = os.Remove(configFilePath)
|
||||
assert.NoError(t, err)
|
||||
providerConf.Actions.Hook = ""
|
||||
providerConf.Actions.HTTPNotificationURL = "http://example.com/notify" //nolint:staticcheck
|
||||
c = make(map[string]dataprovider.Config)
|
||||
c["data_provider"] = providerConf
|
||||
jsonConf, err = json.Marshal(c)
|
||||
assert.NoError(t, err)
|
||||
err = ioutil.WriteFile(configFilePath, jsonConf, 0666)
|
||||
assert.NoError(t, err)
|
||||
err = config.LoadConfig(configDir, tempConfigName)
|
||||
assert.NoError(t, err)
|
||||
providerConf = config.GetProviderConf()
|
||||
assert.Equal(t, "http://example.com/notify", providerConf.Actions.Hook)
|
||||
err = os.Remove(configFilePath)
|
||||
assert.NoError(t, err)
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.KeyboardInteractiveProgram = "key_int_program" //nolint:staticcheck
|
||||
sftpdConf.Actions.Command = "/tmp/sftp_cmd" //nolint:staticcheck
|
||||
cnf := make(map[string]sftpd.Configuration)
|
||||
cnf["sftpd"] = sftpdConf
|
||||
jsonConf, err = json.Marshal(cnf)
|
||||
assert.NoError(t, err)
|
||||
err = ioutil.WriteFile(configFilePath, jsonConf, 0666)
|
||||
assert.NoError(t, err)
|
||||
err = config.LoadConfig(configDir, tempConfigName)
|
||||
assert.NoError(t, err)
|
||||
sftpdConf = config.GetSFTPDConfig()
|
||||
assert.Equal(t, "key_int_program", sftpdConf.KeyboardInteractiveHook)
|
||||
assert.Equal(t, "/tmp/sftp_cmd", sftpdConf.Actions.Hook)
|
||||
err = os.Remove(configFilePath)
|
||||
assert.NoError(t, err)
|
||||
sftpdConf.Actions.Hook = ""
|
||||
sftpdConf.Actions.HTTPNotificationURL = "http://example.com/sftp" //nolint:staticcheck
|
||||
cnf = make(map[string]sftpd.Configuration)
|
||||
cnf["sftpd"] = sftpdConf
|
||||
jsonConf, err = json.Marshal(cnf)
|
||||
assert.NoError(t, err)
|
||||
err = ioutil.WriteFile(configFilePath, jsonConf, 0666)
|
||||
assert.NoError(t, err)
|
||||
err = config.LoadConfig(configDir, tempConfigName)
|
||||
assert.NoError(t, err)
|
||||
sftpdConf = config.GetSFTPDConfig()
|
||||
assert.Equal(t, "http://example.com/sftp", sftpdConf.Actions.Hook)
|
||||
commonConf := config.GetCommonConfig()
|
||||
assert.Equal(t, 21, commonConf.IdleTimeout)
|
||||
assert.Equal(t, "http://hook", commonConf.Actions.Hook)
|
||||
assert.Len(t, commonConf.Actions.ExecuteOn, 1)
|
||||
assert.True(t, utils.IsStringInSlice("upload", commonConf.Actions.ExecuteOn))
|
||||
assert.Equal(t, 1, commonConf.SetstatMode)
|
||||
assert.Equal(t, 1, commonConf.ProxyProtocol)
|
||||
assert.Len(t, commonConf.ProxyAllowed, 1)
|
||||
assert.True(t, utils.IsStringInSlice("192.168.1.1", commonConf.ProxyAllowed))
|
||||
err = os.Remove(configFilePath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
@ -271,9 +237,9 @@ func TestHostKeyCompatibility(t *testing.T) {
|
|||
|
||||
func TestSetGetConfig(t *testing.T) {
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.IdleTimeout = 3
|
||||
sftpdConf.MaxAuthTries = 10
|
||||
config.SetSFTPDConfig(sftpdConf)
|
||||
assert.Equal(t, sftpdConf.IdleTimeout, config.GetSFTPDConfig().IdleTimeout)
|
||||
assert.Equal(t, sftpdConf.MaxAuthTries, config.GetSFTPDConfig().MaxAuthTries)
|
||||
dataProviderConf := config.GetProviderConf()
|
||||
dataProviderConf.Host = "test host"
|
||||
config.SetProviderConf(dataProviderConf)
|
||||
|
@ -282,4 +248,8 @@ func TestSetGetConfig(t *testing.T) {
|
|||
httpdConf.BindAddress = "0.0.0.0"
|
||||
config.SetHTTPDConfig(httpdConf)
|
||||
assert.Equal(t, httpdConf.BindAddress, config.GetHTTPDConfig().BindAddress)
|
||||
commonConf := config.GetCommonConfig()
|
||||
commonConf.IdleTimeout = 10
|
||||
config.SetCommonConfig(commonConf)
|
||||
assert.Equal(t, commonConf.IdleTimeout, config.GetCommonConfig().IdleTimeout)
|
||||
}
|
||||
|
|
|
@ -116,15 +116,10 @@ type schemaVersion struct {
|
|||
Version int
|
||||
}
|
||||
|
||||
// Actions to execute on user create, update, delete.
|
||||
// An external command can be executed and/or an HTTP notification can be fired
|
||||
type Actions struct {
|
||||
// UserActions defines the action to execute on user create, update, delete.
|
||||
type UserActions struct {
|
||||
// Valid values are add, update, delete. Empty slice to disable
|
||||
ExecuteOn []string `json:"execute_on" mapstructure:"execute_on"`
|
||||
// Deprecated: please use Hook
|
||||
Command string `json:"command" mapstructure:"command"`
|
||||
// Deprecated: please use Hook
|
||||
HTTPNotificationURL string `json:"http_notification_url" mapstructure:"http_notification_url"`
|
||||
// Absolute path to an external program or an HTTP URL
|
||||
Hook string `json:"hook" mapstructure:"hook"`
|
||||
}
|
||||
|
@ -175,9 +170,7 @@ type Config struct {
|
|||
UsersBaseDir string `json:"users_base_dir" mapstructure:"users_base_dir"`
|
||||
// Actions to execute on user add, update, delete.
|
||||
// Update action will not be fired for internal updates such as the last login or the user quota fields.
|
||||
Actions Actions `json:"actions" mapstructure:"actions"`
|
||||
// Deprecated: please use ExternalAuthHook
|
||||
ExternalAuthProgram string `json:"external_auth_program" mapstructure:"external_auth_program"`
|
||||
Actions UserActions `json:"actions" mapstructure:"actions"`
|
||||
// Absolute path to an external program or an HTTP URL to invoke for users authentication.
|
||||
// Leave empty to use builtin authentication.
|
||||
// The external program can read the following environment variables to get info about the user trying
|
||||
|
@ -227,8 +220,6 @@ type Config struct {
|
|||
// Google Cloud Storage credentials. It can be a path relative to the config dir or an
|
||||
// absolute path
|
||||
CredentialsPath string `json:"credentials_path" mapstructure:"credentials_path"`
|
||||
// Deprecated: please use PreLoginHook
|
||||
PreLoginProgram string `json:"pre_login_program" mapstructure:"pre_login_program"`
|
||||
// Absolute path to an external program or an HTTP URL to invoke just before the user login.
|
||||
// This program/URL allows to modify or create the user trying to login.
|
||||
// It is useful if you have users with dynamic fields to update just before the login.
|
||||
|
@ -360,10 +351,6 @@ type Provider interface {
|
|||
migrateDatabase() error
|
||||
}
|
||||
|
||||
func init() {
|
||||
availabilityTicker = time.NewTicker(30 * time.Second)
|
||||
}
|
||||
|
||||
// Initialize the data provider.
|
||||
// An error is returned if the configured driver is invalid or if the data provider cannot be initialized
|
||||
func Initialize(cnf Config, basePath string) error {
|
||||
|
@ -664,8 +651,11 @@ func GetProviderStatus() error {
|
|||
// This method is used in test cases.
|
||||
// Closing an uninitialized provider is not supported
|
||||
func Close() error {
|
||||
availabilityTicker.Stop()
|
||||
availabilityTickerDone <- true
|
||||
if availabilityTicker != nil {
|
||||
availabilityTicker.Stop()
|
||||
availabilityTickerDone <- true
|
||||
availabilityTicker = nil
|
||||
}
|
||||
return provider.close()
|
||||
}
|
||||
|
||||
|
@ -1224,6 +1214,7 @@ func getSSLMode() string {
|
|||
}
|
||||
|
||||
func startAvailabilityTimer() {
|
||||
availabilityTicker = time.NewTicker(30 * time.Second)
|
||||
availabilityTickerDone = make(chan bool)
|
||||
checkDataprovider()
|
||||
go func() {
|
||||
|
|
|
@ -21,6 +21,9 @@ var (
|
|||
)
|
||||
|
||||
type memoryProviderHandle struct {
|
||||
// configuration file to use for loading users
|
||||
configFile string
|
||||
sync.Mutex
|
||||
isClosed bool
|
||||
// slice with ordered usernames
|
||||
usernames []string
|
||||
|
@ -32,9 +35,6 @@ type memoryProviderHandle struct {
|
|||
vfolders map[string]vfs.BaseVirtualFolder
|
||||
// slice with ordered folders mapped path
|
||||
vfoldersPaths []string
|
||||
// configuration file to use for loading users
|
||||
configFile string
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
// MemoryProvider auth provider for a memory store
|
||||
|
@ -60,15 +60,14 @@ func initializeMemoryProvider(basePath string) error {
|
|||
vfolders: make(map[string]vfs.BaseVirtualFolder),
|
||||
vfoldersPaths: []string{},
|
||||
configFile: configFile,
|
||||
lock: new(sync.Mutex),
|
||||
},
|
||||
}
|
||||
return provider.reloadConfig()
|
||||
}
|
||||
|
||||
func (p MemoryProvider) checkAvailability() error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -76,8 +75,8 @@ func (p MemoryProvider) checkAvailability() error {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) close() error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -112,8 +111,8 @@ func (p MemoryProvider) validateUserAndPubKey(username string, pubKey []byte) (U
|
|||
}
|
||||
|
||||
func (p MemoryProvider) getUserByID(ID int64) (User, error) {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return User{}, errMemoryProviderClosed
|
||||
}
|
||||
|
@ -124,8 +123,8 @@ func (p MemoryProvider) getUserByID(ID int64) (User, error) {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) updateLastLogin(username string) error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -139,8 +138,8 @@ func (p MemoryProvider) updateLastLogin(username string) error {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -164,8 +163,8 @@ func (p MemoryProvider) updateQuota(username string, filesAdd int, sizeAdd int64
|
|||
}
|
||||
|
||||
func (p MemoryProvider) getUsedQuota(username string) (int, int64, error) {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return 0, 0, errMemoryProviderClosed
|
||||
}
|
||||
|
@ -178,8 +177,8 @@ func (p MemoryProvider) getUsedQuota(username string) (int, int64, error) {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) addUser(user User) error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -205,8 +204,8 @@ func (p MemoryProvider) addUser(user User) error {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) updateUser(user User) error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -231,8 +230,8 @@ func (p MemoryProvider) updateUser(user User) error {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) deleteUser(user User) error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -255,8 +254,8 @@ func (p MemoryProvider) deleteUser(user User) error {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) dumpUsers() ([]User, error) {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
users := make([]User, 0, len(p.dbHandle.usernames))
|
||||
var err error
|
||||
if p.dbHandle.isClosed {
|
||||
|
@ -274,8 +273,8 @@ func (p MemoryProvider) dumpUsers() ([]User, error) {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
folders := make([]vfs.BaseVirtualFolder, 0, len(p.dbHandle.vfoldersPaths))
|
||||
if p.dbHandle.isClosed {
|
||||
return folders, errMemoryProviderClosed
|
||||
|
@ -289,8 +288,8 @@ func (p MemoryProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
|
|||
func (p MemoryProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
|
||||
users := make([]User, 0, limit)
|
||||
var err error
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return users, errMemoryProviderClosed
|
||||
}
|
||||
|
@ -337,8 +336,8 @@ func (p MemoryProvider) getUsers(limit int, offset int, order string, username s
|
|||
}
|
||||
|
||||
func (p MemoryProvider) userExists(username string) (User, error) {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return User{}, errMemoryProviderClosed
|
||||
}
|
||||
|
@ -353,8 +352,8 @@ func (p MemoryProvider) userExistsInternal(username string) (User, error) {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) updateFolderQuota(mappedPath string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -376,8 +375,8 @@ func (p MemoryProvider) updateFolderQuota(mappedPath string, filesAdd int, sizeA
|
|||
}
|
||||
|
||||
func (p MemoryProvider) getUsedFolderQuota(mappedPath string) (int, int64, error) {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return 0, 0, errMemoryProviderClosed
|
||||
}
|
||||
|
@ -458,8 +457,8 @@ func (p MemoryProvider) folderExistsInternal(mappedPath string) (vfs.BaseVirtual
|
|||
func (p MemoryProvider) getFolders(limit, offset int, order, folderPath string) ([]vfs.BaseVirtualFolder, error) {
|
||||
folders := make([]vfs.BaseVirtualFolder, 0, limit)
|
||||
var err error
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return folders, errMemoryProviderClosed
|
||||
}
|
||||
|
@ -507,8 +506,8 @@ func (p MemoryProvider) getFolders(limit, offset int, order, folderPath string)
|
|||
}
|
||||
|
||||
func (p MemoryProvider) getFolderByPath(mappedPath string) (vfs.BaseVirtualFolder, error) {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return vfs.BaseVirtualFolder{}, errMemoryProviderClosed
|
||||
}
|
||||
|
@ -516,8 +515,8 @@ func (p MemoryProvider) getFolderByPath(mappedPath string) (vfs.BaseVirtualFolde
|
|||
}
|
||||
|
||||
func (p MemoryProvider) addFolder(folder vfs.BaseVirtualFolder) error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -537,8 +536,8 @@ func (p MemoryProvider) addFolder(folder vfs.BaseVirtualFolder) error {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) deleteFolder(folder vfs.BaseVirtualFolder) error {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
if p.dbHandle.isClosed {
|
||||
return errMemoryProviderClosed
|
||||
}
|
||||
|
@ -590,8 +589,8 @@ func (p MemoryProvider) getNextFolderID() int64 {
|
|||
}
|
||||
|
||||
func (p MemoryProvider) clear() {
|
||||
p.dbHandle.lock.Lock()
|
||||
defer p.dbHandle.lock.Unlock()
|
||||
p.dbHandle.Lock()
|
||||
defer p.dbHandle.Unlock()
|
||||
p.dbHandle.usernames = []string{}
|
||||
p.dbHandle.usersIdx = make(map[int64]string)
|
||||
p.dbHandle.users = make(map[string]User)
|
||||
|
|
|
@ -5,7 +5,7 @@ RUN apk add --no-cache git gcc g++ ca-certificates \
|
|||
WORKDIR /go/src/github.com/drakkan/sftpgo
|
||||
ARG TAG
|
||||
ARG FEATURES
|
||||
# Use --build-arg TAG=LATEST for latest tag. Use e.g. --build-arg TAG=0.9.6 for a specific tag/commit. Otherwise HEAD (master) is built.
|
||||
# Use --build-arg TAG=LATEST for latest tag. Use e.g. --build-arg TAG=v1.0.0 for a specific tag/commit. Otherwise HEAD (master) is built.
|
||||
RUN git checkout $(if [ "${TAG}" = LATEST ]; then echo `git rev-list --tags --max-count=1`; elif [ -n "${TAG}" ]; then echo "${TAG}"; else echo HEAD; fi)
|
||||
RUN go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o /go/bin/sftpgo
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ sudo groupadd -g 1003 sftpgrp && \
|
|||
# Edit sftpgo.json as you need
|
||||
|
||||
# Get and build SFTPGo image.
|
||||
# Add --build-arg TAG=LATEST to build the latest tag or e.g. TAG=0.9.6 for a specific tag/commit.
|
||||
# Add --build-arg TAG=LATEST to build the latest tag or e.g. TAG=v1.0.0 for a specific tag/commit.
|
||||
# Add --build-arg FEATURES=<build features comma separated> to specify the features to build.
|
||||
git clone https://github.com/drakkan/sftpgo.git && \
|
||||
cd sftpgo && \
|
||||
|
|
|
@ -5,7 +5,7 @@ RUN go get -d github.com/drakkan/sftpgo
|
|||
WORKDIR /go/src/github.com/drakkan/sftpgo
|
||||
ARG TAG
|
||||
ARG FEATURES
|
||||
# Use --build-arg TAG=LATEST for latest tag. Use e.g. --build-arg TAG=0.9.6 for a specific tag/commit. Otherwise HEAD (master) is built.
|
||||
# Use --build-arg TAG=LATEST for latest tag. Use e.g. --build-arg TAG=v1.0.0 for a specific tag/commit. Otherwise HEAD (master) is built.
|
||||
RUN git checkout $(if [ "${TAG}" = LATEST ]; then echo `git rev-list --tags --max-count=1`; elif [ -n "${TAG}" ]; then echo "${TAG}"; else echo HEAD; fi)
|
||||
RUN go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
|
||||
|
|
|
@ -10,10 +10,10 @@ docker build -t="drakkan/sftpgo" .
|
|||
|
||||
This will build master of github.com/drakkan/sftpgo.
|
||||
|
||||
To build the latest tag you can add `--build-arg TAG=LATEST` and to build a specific tag/commit you can use for example `TAG=0.9.6`, like this:
|
||||
To build the latest tag you can add `--build-arg TAG=LATEST` and to build a specific tag/commit you can use for example `TAG=v1.0.0`, like this:
|
||||
|
||||
```bash
|
||||
docker build -t="drakkan/sftpgo" --build-arg TAG=0.9.6 .
|
||||
docker build -t="drakkan/sftpgo" --build-arg TAG=v1.0.0 .
|
||||
```
|
||||
|
||||
To specify the features to build you can add `--build-arg FEATURES=<build features comma separated>`. For example you can disable SQLite and S3 support like this:
|
||||
|
|
|
@ -43,19 +43,28 @@ The `gen` command allows to generate completion scripts for your shell and man p
|
|||
|
||||
The configuration file contains the following sections:
|
||||
|
||||
- **"sftpd"**, the configuration for the SFTP server
|
||||
- `bind_port`, integer. The port used for serving SFTP requests. Default: 2022
|
||||
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: ""
|
||||
- **"common"**, configuration parameters shared among all the supported protocols
|
||||
- `idle_timeout`, integer. Time in minutes after which an idle client will be disconnected. 0 means disabled. Default: 15
|
||||
- `max_auth_tries` integer. Maximum number of authentication attempts permitted per connection. If set to a negative number, the number of attempts is unlimited. If set to zero, the number of attempts are limited to 6.
|
||||
- `umask`, string. Umask for the new files and directories. This setting has no effect on Windows. Default: "0022"
|
||||
- `banner`, string. Identification string used by the server. Leave empty to use the default banner. Default `SFTPGo_<version>`, for example `SSH-2.0-SFTPGo_0.9.5`
|
||||
- `upload_mode` integer. 0 means standard: the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode, if there is an upload error, the temporary file is deleted and so the requested upload path will not contain a partial file. 2 means atomic with resume support: same as atomic but if there is an upload error, the temporary file is renamed to the requested path and not deleted. This way, a client can reconnect and resume the upload.
|
||||
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See the "Custom Actions" paragraph for more details
|
||||
- `execute_on`, list of strings. Valid values are `download`, `upload`, `pre-delete`, `delete`, `rename`, `ssh_cmd`. Leave empty to disable actions.
|
||||
- `command`, string. Deprecated please use `hook`.
|
||||
- `http_notification_url`, a valid URL. Deprecated please use `hook`.
|
||||
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
|
||||
- `setstat_mode`, integer. 0 means "normal mode": requests for changing permissions, owner/group and access/modification times are executed. 1 means "ignore mode": requests for changing permissions, owner/group and access/modification times are silently ignored.
|
||||
- `proxy_protocol`, integer. Support for [HAProxy PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGNIX, you can enable the proxy protocol. It provides a convenient way to safely transport connection information such as a client's address across multiple layers of NAT or TCP proxies to get the real client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported. If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too. For example, for HAProxy, add `send-proxy` or `send-proxy-v2` to each server configuration line. The following modes are supported:
|
||||
- 0, disabled
|
||||
- 1, enabled. Proxy header will be used and requests without proxy header will be accepted
|
||||
- 2, required. Proxy header will be used and requests without proxy header will be rejected
|
||||
- `proxy_allowed`, List of IP addresses and IP ranges allowed to send the proxy header:
|
||||
- If `proxy_protocol` is set to 1 and we receive a proxy header from an IP that is not in the list then the connection will be accepted and the header will be ignored
|
||||
- If `proxy_protocol` is set to 2 and we receive a proxy header from an IP that is not in the list then the connection will be rejected
|
||||
- **"sftpd"**, the configuration for the SFTP server
|
||||
- `bind_port`, integer. The port used for serving SFTP requests. Default: 2022
|
||||
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: ""
|
||||
- `idle_timeout`, integer. Deprecated, please use the same key in `common` section.
|
||||
- `max_auth_tries` integer. Maximum number of authentication attempts permitted per connection. If set to a negative number, the number of attempts is unlimited. If set to zero, the number of attempts are limited to 6.
|
||||
- `banner`, string. Identification string used by the server. Leave empty to use the default banner. Default `SFTPGo_<version>`, for example `SSH-2.0-SFTPGo_0.9.5`
|
||||
- `upload_mode` integer. Deprecated, please use the same key in `common` section.
|
||||
- `actions`, struct. Deprecated, please use the same key in `common` section.
|
||||
- `keys`, struct array. Deprecated, please use `host_keys`.
|
||||
- `private_key`, path to the private key file. It can be a path relative to the config dir or an absolute one.
|
||||
- `host_keys`, list of strings. It contains the daemon's private host keys. Each host key can be defined as a path relative to the configuration directory or an absolute one. If empty, the daemon will search or try to generate `id_rsa` and `id_ecdsa` keys inside the configuration directory. If you configure absolute paths to files named `id_rsa` and/or `id_ecdsa` then SFTPGo will try to generate these keys using the default settings.
|
||||
|
@ -64,17 +73,11 @@ The configuration file contains the following sections:
|
|||
- `macs`, list of strings. Available MAC (message authentication code) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L84 "Supported MACs")
|
||||
- `trusted_user_ca_keys`, list of public keys paths of certificate authorities that are trusted to sign user certificates for authentication. The paths can be absolute or relative to the configuration directory.
|
||||
- `login_banner_file`, path to the login banner file. The contents of the specified file, if any, are sent to the remote user before authentication is allowed. It can be a path relative to the config dir or an absolute one. Leave empty to disable login banner.
|
||||
- `setstat_mode`, integer. 0 means "normal mode": requests for changing permissions, owner/group and access/modification times are executed. 1 means "ignore mode": requests for changing permissions, owner/group and access/modification times are silently ignored.
|
||||
- `setstat_mode`, integer. Deprecated, please use the same key in `common` section.
|
||||
- `enabled_ssh_commands`, list of enabled SSH commands. `*` enables all supported commands. More information can be found [here](./ssh-commands.md).
|
||||
- `keyboard_interactive_auth_program`, string. Deprecated, please use `keyboard_interactive_auth_hook`.
|
||||
- `keyboard_interactive_auth_hook`, string. Absolute path to an external program or an HTTP URL to invoke for keyboard interactive authentication. See the "Keyboard Interactive Authentication" paragraph for more details.
|
||||
- `proxy_protocol`, integer. Support for [HAProxy PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGNIX, you can enable the proxy protocol. It provides a convenient way to safely transport connection information such as a client's address across multiple layers of NAT or TCP proxies to get the real client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported. If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too. For example, for HAProxy, add `send-proxy` or `send-proxy-v2` to each server configuration line. The following modes are supported:
|
||||
- 0, disabled
|
||||
- 1, enabled. Proxy header will be used and requests without proxy header will be accepted
|
||||
- 2, required. Proxy header will be used and requests without proxy header will be rejected
|
||||
- `proxy_allowed`, List of IP addresses and IP ranges allowed to send the proxy header:
|
||||
- If `proxy_protocol` is set to 1 and we receive a proxy header from an IP that is not in the list then the connection will be accepted and the header will be ignored
|
||||
- If `proxy_protocol` is set to 2 and we receive a proxy header from an IP that is not in the list then the connection will be rejected
|
||||
- `proxy_protocol`, integer. Deprecated, please use the same key in `common` section.
|
||||
- `proxy_allowed`, list of strings. Deprecated, please use the same key in `common` section.
|
||||
- **"data_provider"**, the configuration for the data provider
|
||||
- `driver`, string. Supported drivers are `sqlite`, `mysql`, `postgresql`, `bolt`, `memory`
|
||||
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database. For driver `memory` this is the (optional) path relative to the config dir or the absolute path to the users dump, obtained using the `dumpdata` REST API, to load. This dump will be loaded at startup and can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. The `memory` provider will not modify the provided file so quota usage and last login will not be persisted
|
||||
|
@ -94,8 +97,6 @@ The configuration file contains the following sections:
|
|||
- `users_base_dir`, string. Users default base directory. If no home dir is defined while adding a new user, and this value is a valid absolute path, then the user home dir will be automatically defined as the path obtained joining the base dir and the username
|
||||
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See the "Custom Actions" paragraph for more details
|
||||
- `execute_on`, list of strings. Valid values are `add`, `update`, `delete`. `update` action will not be fired for internal updates such as the last login or the user quota fields.
|
||||
- `command`, string. Deprecated please use `hook`.
|
||||
- `http_notification_url`, a valid URL. Deprecated please use `hook`.
|
||||
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
|
||||
- `external_auth_program`, string. Deprecated, please use `external_auth_hook`.
|
||||
- `external_auth_hook`, string. Absolute path to an external program or an HTTP URL to invoke for users authentication. See the "External Authentication" paragraph for more details. Leave empty to disable.
|
||||
|
@ -151,6 +152,6 @@ You can also override all the available configuration options using environment
|
|||
Let's see some examples:
|
||||
|
||||
- To set sftpd `bind_port`, you need to define the env var `SFTPGO_SFTPD__BIND_PORT`
|
||||
- To set the `execute_on` actions, you need to define the env var `SFTPGO_SFTPD__ACTIONS__EXECUTE_ON`. For example `SFTPGO_SFTPD__ACTIONS__EXECUTE_ON=upload,download`
|
||||
- To set the `execute_on` actions, you need to define the env var `SFTPGO_COMMON__ACTIONS__EXECUTE_ON`. For example `SFTPGO_COMMON__ACTIONS__EXECUTE_ON=upload,download`
|
||||
|
||||
Please note that, to override configuration options with environment variables, a configuration file containing the options to override is required. You can, for example, deploy the default configuration file and then override the options you need to customize using environment variables.
|
||||
|
|
|
@ -50,5 +50,5 @@ The logs can be divided into the following categories:
|
|||
- `level` string
|
||||
- `username`, string. Can be empty if the connection is closed before an authentication attempt
|
||||
- `client_ip` string.
|
||||
- `login_type` string. Can be `publickey`, `password`, `keyboard-interactive` or `no_auth_tryed`
|
||||
- `login_type` string. Can be `publickey`, `password`, `keyboard-interactive`, `publickey+password`, `publickey+keyboard-interactive` or `no_auth_tryed`
|
||||
- `error` string. Optional error description
|
||||
|
|
|
@ -3,8 +3,7 @@ module github.com/drakkan/ldapauth
|
|||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/go-asn1-ber/asn1-ber v1.4.1 // indirect
|
||||
github.com/go-ldap/ldap/v3 v3.1.8
|
||||
golang.org/x/crypto v0.0.0-20200406173513-056763e48d71
|
||||
golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa // indirect
|
||||
github.com/go-ldap/ldap/v3 v3.2.3
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect
|
||||
)
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-asn1-ber/asn1-ber v1.4.1 h1:qP/QDxOtmMoJVgXHCXNzDpA0+wkgYB2x5QoLMVOciyw=
|
||||
github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.1.8 h1:5vU/2jOh9HqprwXp8aF915s9p6Z8wmbSEVF7/gdTFhM=
|
||||
github.com/go-ldap/ldap/v3 v3.1.8/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.2.3 h1:FBt+5w3q/vPVPb4eYMQSn+pOiz4zewPamYhlGMmc7yM=
|
||||
github.com/go-ldap/ldap/v3 v3.2.3/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200406173513-056763e48d71 h1:DOmugCavvUtnUD114C1Wh+UgTgQZ4pMLzXxi1pSt+/Y=
|
||||
golang.org/x/crypto v0.0.0-20200406173513-056763e48d71/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
|
@ -77,17 +77,26 @@ func addConfigFlags(cmd *cobra.Command) {
|
|||
viper.SetDefault(configDirKey, defaultConfigDir)
|
||||
viper.BindEnv(configDirKey, "LDAPAUTH_CONFIG_DIR")
|
||||
cmd.Flags().StringVarP(&configDir, configDirFlag, "c", viper.GetString(configDirKey),
|
||||
"Location for the config dir. This directory should contain the \"ldapauth\" configuration file or the configured "+
|
||||
"config-file. This flag can be set using LDAPAUTH_CONFIG_DIR env var too.")
|
||||
`Location for the config dir. This directory
|
||||
should contain the "ldapauth" configuration
|
||||
file or the configured config-file. This flag
|
||||
can be set using LDAPAUTH_CONFIG_DIR env var too.
|
||||
`)
|
||||
viper.BindPFlag(configDirKey, cmd.Flags().Lookup(configDirFlag))
|
||||
|
||||
viper.SetDefault(configFileKey, defaultConfigName)
|
||||
viper.BindEnv(configFileKey, "LDAPAUTH_CONFIG_FILE")
|
||||
cmd.Flags().StringVarP(&configFile, configFileFlag, "f", viper.GetString(configFileKey),
|
||||
"Name for the configuration file. It must be the name of a file stored in config-dir not the absolute path to the "+
|
||||
"configuration file. The specified file name must have no extension we automatically load JSON, YAML, TOML, HCL and "+
|
||||
"Java properties. Therefore if you set \"ldapauth\" then \"ldapauth.toml\", \"ldapauth.yaml\" and so on are searched. "+
|
||||
"This flag can be set using LDAPAUTH_CONFIG_FILE env var too.")
|
||||
`Name for the configuration file. It must be
|
||||
the name of a file stored in config-dir not
|
||||
the absolute path to the configuration file.
|
||||
The specified file name must have no extension
|
||||
we automatically load JSON, YAML, TOML, HCL and
|
||||
Java properties. Therefore if you set \"ldapauth\"
|
||||
then \"ldapauth.toml\", \"ldapauth.yaml\" and
|
||||
so on are searched. This flag can be set using
|
||||
LDAPAUTH_CONFIG_FILE env var too.
|
||||
`)
|
||||
viper.BindPFlag(configFileKey, cmd.Flags().Lookup(configFileFlag))
|
||||
}
|
||||
|
||||
|
@ -97,41 +106,53 @@ func addServeFlags(cmd *cobra.Command) {
|
|||
viper.SetDefault(logFilePathKey, defaultLogFile)
|
||||
viper.BindEnv(logFilePathKey, "LDAPAUTH_LOG_FILE_PATH")
|
||||
cmd.Flags().StringVarP(&logFilePath, logFilePathFlag, "l", viper.GetString(logFilePathKey),
|
||||
"Location for the log file. Leave empty to write logs to the standard output. This flag can be set using LDAPAUTH_LOG_FILE_PATH "+
|
||||
"env var too.")
|
||||
`Location for the log file. Leave empty to write
|
||||
logs to the standard output. This flag can be
|
||||
set using LDAPAUTH_LOG_FILE_PATH env var too.
|
||||
`)
|
||||
viper.BindPFlag(logFilePathKey, cmd.Flags().Lookup(logFilePathFlag))
|
||||
|
||||
viper.SetDefault(logMaxSizeKey, defaultLogMaxSize)
|
||||
viper.BindEnv(logMaxSizeKey, "LDAPAUTH_LOG_MAX_SIZE")
|
||||
cmd.Flags().IntVarP(&logMaxSize, logMaxSizeFlag, "s", viper.GetInt(logMaxSizeKey),
|
||||
"Maximum size in megabytes of the log file before it gets rotated. This flag can be set using LDAPAUTH_LOG_MAX_SIZE "+
|
||||
"env var too. It is unused if log-file-path is empty.")
|
||||
`Maximum size in megabytes of the log file
|
||||
before it gets rotated. This flag can be set
|
||||
using LDAPAUTH_LOG_MAX_SIZE env var too. It
|
||||
is unused if log-file-path is empty.`)
|
||||
viper.BindPFlag(logMaxSizeKey, cmd.Flags().Lookup(logMaxSizeFlag))
|
||||
|
||||
viper.SetDefault(logMaxBackupKey, defaultLogMaxBackup)
|
||||
viper.BindEnv(logMaxBackupKey, "LDAPAUTH_LOG_MAX_BACKUPS")
|
||||
cmd.Flags().IntVarP(&logMaxBackups, "log-max-backups", "b", viper.GetInt(logMaxBackupKey),
|
||||
"Maximum number of old log files to retain. This flag can be set using LDAPAUTH_LOG_MAX_BACKUPS env var too. "+
|
||||
"It is unused if log-file-path is empty.")
|
||||
`Maximum number of old log files to retain.
|
||||
This flag can be set using LDAPAUTH_LOG_MAX_BACKUPS
|
||||
env var too. It is unused if log-file-path is
|
||||
empty.`)
|
||||
viper.BindPFlag(logMaxBackupKey, cmd.Flags().Lookup(logMaxBackupFlag))
|
||||
|
||||
viper.SetDefault(logMaxAgeKey, defaultLogMaxAge)
|
||||
viper.BindEnv(logMaxAgeKey, "LDAPAUTH_LOG_MAX_AGE")
|
||||
cmd.Flags().IntVarP(&logMaxAge, "log-max-age", "a", viper.GetInt(logMaxAgeKey),
|
||||
"Maximum number of days to retain old log files. This flag can be set using LDAPAUTH_LOG_MAX_AGE env var too. "+
|
||||
"It is unused if log-file-path is empty.")
|
||||
`Maximum number of days to retain old log files.
|
||||
This flag can be set using LDAPAUTH_LOG_MAX_AGE
|
||||
env var too. It is unused if log-file-path is
|
||||
empty.`)
|
||||
viper.BindPFlag(logMaxAgeKey, cmd.Flags().Lookup(logMaxAgeFlag))
|
||||
|
||||
viper.SetDefault(logCompressKey, defaultLogCompress)
|
||||
viper.BindEnv(logCompressKey, "LDAPAUTH_LOG_COMPRESS")
|
||||
cmd.Flags().BoolVarP(&logCompress, logCompressFlag, "z", viper.GetBool(logCompressKey), "Determine if the rotated "+
|
||||
"log files should be compressed using gzip. This flag can be set using LDAPAUTH_LOG_COMPRESS env var too. "+
|
||||
"It is unused if log-file-path is empty.")
|
||||
cmd.Flags().BoolVarP(&logCompress, logCompressFlag, "z", viper.GetBool(logCompressKey),
|
||||
`Determine if the rotated log files
|
||||
should be compressed using gzip. This flag can
|
||||
be set using LDAPAUTH_LOG_COMPRESS env var too.
|
||||
It is unused if log-file-path is empty.`)
|
||||
viper.BindPFlag(logCompressKey, cmd.Flags().Lookup(logCompressFlag))
|
||||
|
||||
viper.SetDefault(logVerboseKey, defaultLogVerbose)
|
||||
viper.BindEnv(logVerboseKey, "LDAPAUTH_LOG_VERBOSE")
|
||||
cmd.Flags().BoolVarP(&logVerbose, logVerboseFlag, "v", viper.GetBool(logVerboseKey), "Enable verbose logs. "+
|
||||
"This flag can be set using LDAPAUTH_LOG_VERBOSE env var too.")
|
||||
cmd.Flags().BoolVarP(&logVerbose, logVerboseFlag, "v", viper.GetBool(logVerboseKey),
|
||||
`Enable verbose logs. This flag can be set
|
||||
using LDAPAUTH_LOG_VERBOSE env var too.
|
||||
`)
|
||||
viper.BindPFlag(logVerboseKey, cmd.Flags().Lookup(logVerboseFlag))
|
||||
}
|
||||
|
|
|
@ -4,23 +4,24 @@ go 1.14
|
|||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.4.1 // indirect
|
||||
github.com/go-chi/chi v4.1.1+incompatible
|
||||
github.com/go-chi/chi v4.1.2+incompatible
|
||||
github.com/go-chi/render v1.0.1
|
||||
github.com/go-ldap/ldap/v3 v3.1.8
|
||||
github.com/mitchellh/mapstructure v1.2.2 // indirect
|
||||
github.com/go-ldap/ldap/v3 v3.2.3
|
||||
github.com/json-iterator/go v1.1.9 // indirect
|
||||
github.com/mitchellh/mapstructure v1.3.2 // indirect
|
||||
github.com/nathanaelle/password/v2 v2.0.1
|
||||
github.com/pelletier/go-toml v1.7.0 // indirect
|
||||
github.com/rs/zerolog v1.18.0
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/pelletier/go-toml v1.8.0 // indirect
|
||||
github.com/rs/zerolog v1.19.0
|
||||
github.com/spf13/afero v1.3.2 // indirect
|
||||
github.com/spf13/cast v1.3.1 // indirect
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.6.3
|
||||
golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
gopkg.in/ini.v1 v1.55.0 // indirect
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/zenazn/goji v0.9.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect
|
||||
golang.org/x/text v0.3.3 // indirect
|
||||
gopkg.in/ini.v1 v1.57.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
)
|
||||
|
|
|
@ -1,43 +1,60 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-asn1-ber/asn1-ber v1.3.1 h1:gvPdv/Hr++TRFCl0UbPFHC54P9N9jgsRPnmnr419Uck=
|
||||
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-asn1-ber/asn1-ber v1.4.1 h1:qP/QDxOtmMoJVgXHCXNzDpA0+wkgYB2x5QoLMVOciyw=
|
||||
github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-chi/chi v4.1.1+incompatible h1:MmTgB0R8Bt/jccxp+t6S/1VGIKdJw5J74CK/c9tTfA4=
|
||||
github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec=
|
||||
github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||
github.com/go-chi/render v1.0.1 h1:4/5tis2cKaNdnv9zFLfXzcquC9HbeZgCnxGnKrltBS8=
|
||||
github.com/go-chi/render v1.0.1/go.mod h1:pq4Rr7HbnsdaeHagklXub+p6Wd16Af5l9koip1OvJns=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-ldap/ldap/v3 v3.1.8 h1:5vU/2jOh9HqprwXp8aF915s9p6Z8wmbSEVF7/gdTFhM=
|
||||
github.com/go-ldap/ldap/v3 v3.1.8/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q=
|
||||
github.com/go-ldap/ldap/v3 v3.2.3 h1:FBt+5w3q/vPVPb4eYMQSn+pOiz4zewPamYhlGMmc7yM=
|
||||
github.com/go-ldap/ldap/v3 v3.2.3/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
|
@ -46,60 +63,98 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
|
|||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4=
|
||||
github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg=
|
||||
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nathanaelle/password/v2 v2.0.1 h1:ItoCTdsuIWzilYmllQPa3DR3YoCXcpfxScWLqr8Ii2s=
|
||||
github.com/nathanaelle/password/v2 v2.0.1/go.mod h1:eaoT+ICQEPNtikBRIAatN8ThWwMhVG+r1jTw60BvPJk=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw=
|
||||
github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
|
@ -110,44 +165,41 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
|||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.18.0 h1:CbAm3kP2Tptby1i9sYy2MGRg0uxIN9cyDb59Ys7W8z8=
|
||||
github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I=
|
||||
github.com/rs/zerolog v1.19.0 h1:hYz4ZVdUgjXTBUmrkrw55j1nHx68LfOKIQk5IYtyScg=
|
||||
github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/afero v1.3.2 h1:GDarE4TJQI52kYSbSAmLiId1Elfj+xgSDqrUZxFhxlU=
|
||||
github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs=
|
||||
github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw=
|
||||
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
|
@ -156,73 +208,144 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
|
|||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a h1:y6sBfNd1b9Wy08a6K1Z1DZc4aXABUN5TKjkYhz7UKmo=
|
||||
golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ=
|
||||
gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
|
||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
|
|
@ -32,10 +32,10 @@ type httpAuthProvider interface {
|
|||
}
|
||||
|
||||
type basicAuthProvider struct {
|
||||
Path string
|
||||
Path string
|
||||
sync.RWMutex
|
||||
Info os.FileInfo
|
||||
Users map[string]string
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
|
||||
func newBasicAuthProvider(authUserFile string) (httpAuthProvider, error) {
|
||||
|
@ -43,7 +43,6 @@ func newBasicAuthProvider(authUserFile string) (httpAuthProvider, error) {
|
|||
Path: authUserFile,
|
||||
Info: nil,
|
||||
Users: make(map[string]string),
|
||||
lock: new(sync.RWMutex),
|
||||
}
|
||||
return &basicAuthProvider, basicAuthProvider.loadUsers()
|
||||
}
|
||||
|
@ -53,8 +52,8 @@ func (p *basicAuthProvider) isEnabled() bool {
|
|||
}
|
||||
|
||||
func (p *basicAuthProvider) isReloadNeeded(info os.FileInfo) bool {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
return p.Info == nil || p.Info.ModTime() != info.ModTime() || p.Info.Size() != info.Size()
|
||||
}
|
||||
|
||||
|
@ -83,8 +82,8 @@ func (p *basicAuthProvider) loadUsers() error {
|
|||
logger.Debug(logSender, "", "unable to parse basic auth users file: %v", err)
|
||||
return err
|
||||
}
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.Users = make(map[string]string)
|
||||
for _, record := range records {
|
||||
if len(record) == 2 {
|
||||
|
@ -102,8 +101,8 @@ func (p *basicAuthProvider) getHashedPassword(username string) (string, bool) {
|
|||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
pwd, ok := p.Users[username]
|
||||
return pwd, ok
|
||||
}
|
||||
|
|
|
@ -8,10 +8,10 @@ import (
|
|||
)
|
||||
|
||||
type certManager struct {
|
||||
cert *tls.Certificate
|
||||
certPath string
|
||||
keyPath string
|
||||
lock *sync.RWMutex
|
||||
sync.RWMutex
|
||||
cert *tls.Certificate
|
||||
}
|
||||
|
||||
func (m *certManager) loadCertificate() error {
|
||||
|
@ -21,16 +21,16 @@ func (m *certManager) loadCertificate() error {
|
|||
return err
|
||||
}
|
||||
logger.Debug(logSender, "", "https certificate successfully loaded")
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.cert = &newCert
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *certManager) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
m.lock.RLock()
|
||||
defer m.lock.RUnlock()
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return m.cert, nil
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,6 @@ func newCertManager(certificateFile, certificateKeyFile string) (*certManager, e
|
|||
cert: nil,
|
||||
certPath: certificateFile,
|
||||
keyPath: certificateKeyFile,
|
||||
lock: new(sync.RWMutex),
|
||||
}
|
||||
err := manager.loadCertificate()
|
||||
if err != nil {
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
lumberjack "gopkg.in/natefinch/lumberjack.v2"
|
||||
|
@ -38,9 +37,9 @@ func InitLogger(logFilePath string, logMaxSize, logMaxBackups, logMaxAge int, lo
|
|||
})
|
||||
EnableConsoleLogger(level)
|
||||
} else {
|
||||
logger = zerolog.New(logSyncWrapper{
|
||||
logger = zerolog.New(&logSyncWrapper{
|
||||
output: os.Stdout,
|
||||
lock: new(sync.Mutex)})
|
||||
})
|
||||
consoleLogger = zerolog.Nop()
|
||||
}
|
||||
logger.Level(level)
|
||||
|
|
|
@ -6,12 +6,12 @@ import (
|
|||
)
|
||||
|
||||
type logSyncWrapper struct {
|
||||
lock *sync.Mutex
|
||||
sync.Mutex
|
||||
output *os.File
|
||||
}
|
||||
|
||||
func (l logSyncWrapper) Write(b []byte) (n int, err error) {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
func (l *logSyncWrapper) Write(b []byte) (n int, err error) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
return l.output.Write(b)
|
||||
}
|
||||
|
|
|
@ -306,7 +306,6 @@ Output:
|
|||
{
|
||||
"active_transfers": [
|
||||
{
|
||||
"last_activity": 1577197485561,
|
||||
"operation_type": "upload",
|
||||
"path": "/test_upload.tar.gz",
|
||||
"size": 1540096,
|
||||
|
@ -319,7 +318,6 @@ Output:
|
|||
"last_activity": 1577197485561,
|
||||
"protocol": "SFTP",
|
||||
"remote_address": "127.0.0.1:43714",
|
||||
"ssh_command": "",
|
||||
"username": "test_username"
|
||||
}
|
||||
]
|
||||
|
|
7
go.mod
7
go.mod
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/otiai10/copy v1.2.0
|
||||
github.com/pelletier/go-toml v1.8.0 // indirect
|
||||
github.com/pires/go-proxyproto v0.1.3
|
||||
github.com/pkg/sftp v1.11.1-0.20200310224833-18dc4db7a456
|
||||
github.com/pkg/sftp v1.11.1-0.20200716191756-97b9df616e69
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/rs/xid v1.2.1
|
||||
github.com/rs/zerolog v1.19.0
|
||||
|
@ -47,7 +47,4 @@ require (
|
|||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/pkg/sftp => github.com/drakkan/sftp v0.0.0-20200705201813-118ca5720446
|
||||
golang.org/x/crypto => github.com/drakkan/crypto v0.0.0-20200705203859-05ad140ecdbd
|
||||
)
|
||||
replace golang.org/x/crypto => github.com/drakkan/crypto v0.0.0-20200705203859-05ad140ecdbd
|
||||
|
|
5
go.sum
5
go.sum
|
@ -84,8 +84,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
|||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/drakkan/crypto v0.0.0-20200705203859-05ad140ecdbd h1:7DQ8ayx6QylOxQrQ6oHdO+gk1cqtaINUekLrwD9gGNc=
|
||||
github.com/drakkan/crypto v0.0.0-20200705203859-05ad140ecdbd/go.mod h1:v3bhWOXGYda7H5d2s5t9XA6th3fxW3s0MQxU1R96G/w=
|
||||
github.com/drakkan/sftp v0.0.0-20200705201813-118ca5720446 h1:GxI4rQ487aXpKd4RkqZJ4aeJ0/1uksscNlaZ/a3FlN0=
|
||||
github.com/drakkan/sftp v0.0.0-20200705201813-118ca5720446/go.mod h1:PIrgHN0+qgDmYTNiwryjoEqmXo9tv8aMwQ//Yg1xwIs=
|
||||
github.com/eikenb/pipeat v0.0.0-20200430215831-470df5986b6d h1:8RvCRWer7TB2n+DKhW4uW15hRiqPmabSnSyYhju/Nuw=
|
||||
github.com/eikenb/pipeat v0.0.0-20200430215831-470df5986b6d/go.mod h1:+JPhBw5JdJrSF80r6xsSg1TYHjyAGxYs4X24VyUdMZU=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
|
@ -262,6 +260,9 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/sftp v1.11.1-0.20200716191756-97b9df616e69 h1:qbrvNcVkxFdNuawO0LsSRRVMubCHOHdDTbb5EO/hiPE=
|
||||
github.com/pkg/sftp v1.11.1-0.20200716191756-97b9df616e69/go.mod h1:PIrgHN0+qgDmYTNiwryjoEqmXo9tv8aMwQ//Yg1xwIs=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
|
|
|
@ -11,9 +11,9 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
|
@ -154,7 +154,7 @@ func restoreFolders(folders []vfs.BaseVirtualFolder, inputFile string, scanQuota
|
|||
return err
|
||||
}
|
||||
if scanQuota >= 1 {
|
||||
if sftpd.AddVFolderQuotaScan(folder.MappedPath) {
|
||||
if common.QuotaScans.AddVFolderQuotaScan(folder.MappedPath) {
|
||||
logger.Debug(logSender, "", "starting quota scan for restored folder: %#v", folder.MappedPath)
|
||||
go doFolderQuotaScan(folder) //nolint:errcheck
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ func restoreUsers(users []dataprovider.User, inputFile string, mode, scanQuota i
|
|||
return err
|
||||
}
|
||||
if scanQuota == 1 || (scanQuota == 2 && user.HasQuotaRestrictions()) {
|
||||
if sftpd.AddQuotaScan(user.Username) {
|
||||
if common.QuotaScans.AddUserQuotaScan(user.Username) {
|
||||
logger.Debug(logSender, "", "starting quota scan for restored user: %#v", user.Username)
|
||||
go doQuotaScan(user) //nolint:errcheck
|
||||
}
|
||||
|
|
|
@ -6,9 +6,9 @@ import (
|
|||
|
||||
"github.com/go-chi/render"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
|
@ -18,11 +18,11 @@ const (
|
|||
)
|
||||
|
||||
func getQuotaScans(w http.ResponseWriter, r *http.Request) {
|
||||
render.JSON(w, r, sftpd.GetQuotaScans())
|
||||
render.JSON(w, r, common.QuotaScans.GetUsersQuotaScans())
|
||||
}
|
||||
|
||||
func getVFolderQuotaScans(w http.ResponseWriter, r *http.Request) {
|
||||
render.JSON(w, r, sftpd.GetVFoldersQuotaScans())
|
||||
render.JSON(w, r, common.QuotaScans.GetVFoldersQuotaScans())
|
||||
}
|
||||
|
||||
func updateUserQuotaUsage(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -53,11 +53,11 @@ func updateUserQuotaUsage(w http.ResponseWriter, r *http.Request) {
|
|||
"", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if !sftpd.AddQuotaScan(user.Username) {
|
||||
if !common.QuotaScans.AddUserQuotaScan(user.Username) {
|
||||
sendAPIResponse(w, r, err, "A quota scan is in progress for this user", http.StatusConflict)
|
||||
return
|
||||
}
|
||||
defer sftpd.RemoveQuotaScan(user.Username) //nolint:errcheck
|
||||
defer common.QuotaScans.RemoveUserQuotaScan(user.Username)
|
||||
err = dataprovider.UpdateUserQuota(user, u.UsedQuotaFiles, u.UsedQuotaSize, mode == quotaUpdateModeReset)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
|
@ -89,11 +89,11 @@ func updateVFolderQuotaUsage(w http.ResponseWriter, r *http.Request) {
|
|||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
return
|
||||
}
|
||||
if !sftpd.AddVFolderQuotaScan(folder.MappedPath) {
|
||||
if !common.QuotaScans.AddVFolderQuotaScan(folder.MappedPath) {
|
||||
sendAPIResponse(w, r, err, "A quota scan is in progress for this folder", http.StatusConflict)
|
||||
return
|
||||
}
|
||||
defer sftpd.RemoveVFolderQuotaScan(folder.MappedPath) //nolint:errcheck
|
||||
defer common.QuotaScans.RemoveVFolderQuotaScan(folder.MappedPath)
|
||||
err = dataprovider.UpdateVirtualFolderQuota(folder, f.UsedQuotaFiles, f.UsedQuotaSize, mode == quotaUpdateModeReset)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
|
@ -119,7 +119,7 @@ func startQuotaScan(w http.ResponseWriter, r *http.Request) {
|
|||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
return
|
||||
}
|
||||
if sftpd.AddQuotaScan(user.Username) {
|
||||
if common.QuotaScans.AddUserQuotaScan(user.Username) {
|
||||
go doQuotaScan(user) //nolint:errcheck
|
||||
sendAPIResponse(w, r, err, "Scan started", http.StatusCreated)
|
||||
} else {
|
||||
|
@ -144,7 +144,7 @@ func startVFolderQuotaScan(w http.ResponseWriter, r *http.Request) {
|
|||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
return
|
||||
}
|
||||
if sftpd.AddVFolderQuotaScan(folder.MappedPath) {
|
||||
if common.QuotaScans.AddVFolderQuotaScan(folder.MappedPath) {
|
||||
go doFolderQuotaScan(folder) //nolint:errcheck
|
||||
sendAPIResponse(w, r, err, "Scan started", http.StatusCreated)
|
||||
} else {
|
||||
|
@ -153,7 +153,7 @@ func startVFolderQuotaScan(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func doQuotaScan(user dataprovider.User) error {
|
||||
defer sftpd.RemoveQuotaScan(user.Username) //nolint:errcheck
|
||||
defer common.QuotaScans.RemoveUserQuotaScan(user.Username)
|
||||
fs, err := user.GetFilesystem("")
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "unable scan quota for user %#v error creating filesystem: %v", user.Username, err)
|
||||
|
@ -170,7 +170,7 @@ func doQuotaScan(user dataprovider.User) error {
|
|||
}
|
||||
|
||||
func doFolderQuotaScan(folder vfs.BaseVirtualFolder) error {
|
||||
defer sftpd.RemoveVFolderQuotaScan(folder.MappedPath) //nolint:errcheck
|
||||
defer common.QuotaScans.RemoveVFolderQuotaScan(folder.MappedPath)
|
||||
fs := vfs.NewOsFs("", "", nil).(vfs.OsFs)
|
||||
numFiles, size, err := fs.GetDirSize(folder.MappedPath)
|
||||
if err != nil {
|
||||
|
|
|
@ -18,9 +18,9 @@ import (
|
|||
|
||||
"github.com/go-chi/render"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpclient"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/drakkan/sftpgo/version"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
|
@ -204,8 +204,8 @@ func GetUsers(limit, offset int64, username string, expectedStatusCode int) ([]d
|
|||
}
|
||||
|
||||
// GetQuotaScans gets active quota scans for users and checks the received HTTP Status code against expectedStatusCode.
|
||||
func GetQuotaScans(expectedStatusCode int) ([]sftpd.ActiveQuotaScan, []byte, error) {
|
||||
var quotaScans []sftpd.ActiveQuotaScan
|
||||
func GetQuotaScans(expectedStatusCode int) ([]common.ActiveQuotaScan, []byte, error) {
|
||||
var quotaScans []common.ActiveQuotaScan
|
||||
var body []byte
|
||||
resp, err := sendHTTPRequest(http.MethodGet, buildURLRelativeToBase(quotaScanPath), nil, "")
|
||||
if err != nil {
|
||||
|
@ -252,8 +252,8 @@ func UpdateQuotaUsage(user dataprovider.User, mode string, expectedStatusCode in
|
|||
}
|
||||
|
||||
// GetConnections returns status and stats for active SFTP/SCP connections
|
||||
func GetConnections(expectedStatusCode int) ([]sftpd.ConnectionStatus, []byte, error) {
|
||||
var connections []sftpd.ConnectionStatus
|
||||
func GetConnections(expectedStatusCode int) ([]common.ConnectionStatus, []byte, error) {
|
||||
var connections []common.ConnectionStatus
|
||||
var body []byte
|
||||
resp, err := sendHTTPRequest(http.MethodGet, buildURLRelativeToBase(activeConnectionsPath), nil, "")
|
||||
if err != nil {
|
||||
|
@ -360,8 +360,8 @@ func GetFolders(limit int64, offset int64, mappedPath string, expectedStatusCode
|
|||
}
|
||||
|
||||
// GetFoldersQuotaScans gets active quota scans for folders and checks the received HTTP Status code against expectedStatusCode.
|
||||
func GetFoldersQuotaScans(expectedStatusCode int) ([]sftpd.ActiveVirtualFolderQuotaScan, []byte, error) {
|
||||
var quotaScans []sftpd.ActiveVirtualFolderQuotaScan
|
||||
func GetFoldersQuotaScans(expectedStatusCode int) ([]common.ActiveVirtualFolderQuotaScan, []byte, error) {
|
||||
var quotaScans []common.ActiveVirtualFolderQuotaScan
|
||||
var body []byte
|
||||
resp, err := sendHTTPRequest(http.MethodGet, buildURLRelativeToBase(quotaScanVFolderPath), nil, "")
|
||||
if err != nil {
|
||||
|
|
|
@ -33,10 +33,10 @@ type httpAuthProvider interface {
|
|||
}
|
||||
|
||||
type basicAuthProvider struct {
|
||||
Path string
|
||||
Path string
|
||||
sync.RWMutex
|
||||
Info os.FileInfo
|
||||
Users map[string]string
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
|
||||
func newBasicAuthProvider(authUserFile string) (httpAuthProvider, error) {
|
||||
|
@ -44,7 +44,6 @@ func newBasicAuthProvider(authUserFile string) (httpAuthProvider, error) {
|
|||
Path: authUserFile,
|
||||
Info: nil,
|
||||
Users: make(map[string]string),
|
||||
lock: new(sync.RWMutex),
|
||||
}
|
||||
return &basicAuthProvider, basicAuthProvider.loadUsers()
|
||||
}
|
||||
|
@ -54,8 +53,8 @@ func (p *basicAuthProvider) isEnabled() bool {
|
|||
}
|
||||
|
||||
func (p *basicAuthProvider) isReloadNeeded(info os.FileInfo) bool {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
return p.Info == nil || p.Info.ModTime() != info.ModTime() || p.Info.Size() != info.Size()
|
||||
}
|
||||
|
||||
|
@ -84,8 +83,8 @@ func (p *basicAuthProvider) loadUsers() error {
|
|||
logger.Debug(logSender, "", "unable to parse basic auth users file: %v", err)
|
||||
return err
|
||||
}
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.Users = make(map[string]string)
|
||||
for _, record := range records {
|
||||
if len(record) == 2 {
|
||||
|
@ -103,8 +102,8 @@ func (p *basicAuthProvider) getHashedPassword(username string) (string, bool) {
|
|||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
pwd, ok := p.Users[username]
|
||||
return pwd, ok
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
|
||||
"github.com/go-chi/chi"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
)
|
||||
|
@ -50,7 +51,7 @@ var (
|
|||
router *chi.Mux
|
||||
backupsPath string
|
||||
httpAuth httpAuthProvider
|
||||
certMgr *certManager
|
||||
certMgr *common.CertManager
|
||||
)
|
||||
|
||||
// Conf httpd daemon configuration
|
||||
|
@ -123,7 +124,7 @@ func (c Conf) Initialize(configDir string, enableProfiler bool) error {
|
|||
MaxHeaderBytes: 1 << 16, // 64KB
|
||||
}
|
||||
if len(certificateFile) > 0 && len(certificateKeyFile) > 0 {
|
||||
certMgr, err = newCertManager(certificateFile, certificateKeyFile)
|
||||
certMgr, err = common.NewCertManager(certificateFile, certificateKeyFile, logSender)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -139,7 +140,7 @@ func (c Conf) Initialize(configDir string, enableProfiler bool) error {
|
|||
// ReloadTLSCertificate reloads the TLS certificate and key from the configured paths
|
||||
func ReloadTLSCertificate() error {
|
||||
if certMgr != nil {
|
||||
return certMgr.loadCertificate()
|
||||
return certMgr.LoadCertificate(logSender)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -28,11 +28,11 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/config"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpd"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
@ -41,7 +41,6 @@ const (
|
|||
defaultUsername = "test_user"
|
||||
defaultPassword = "test_password"
|
||||
testPubKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0= nicola@p1"
|
||||
logSender = "APITesting"
|
||||
userPath = "/api/v1/user"
|
||||
folderPath = "/api/v1/folder"
|
||||
activeConnectionsPath = "/api/v1/connection"
|
||||
|
@ -109,6 +108,8 @@ func TestMain(m *testing.M) {
|
|||
os.RemoveAll(credentialsPath) //nolint:errcheck
|
||||
logger.InfoToConsole("Starting HTTPD tests, provider: %v", providerConf.Driver)
|
||||
|
||||
common.Initialize(config.GetCommonConfig())
|
||||
|
||||
err = dataprovider.Initialize(providerConf, configDir)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error initializing data provider: %v", err)
|
||||
|
@ -126,13 +127,14 @@ func TestMain(m *testing.M) {
|
|||
httpdConf.BackupsPath = backupsPath
|
||||
err = os.MkdirAll(backupsPath, os.ModePerm)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error creating backups path: %v", err)
|
||||
logger.ErrorToConsole("error creating backups path: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := httpdConf.Initialize(configDir, true); err != nil {
|
||||
logger.ErrorToConsole("could not start HTTP server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -140,14 +142,14 @@ func TestMain(m *testing.M) {
|
|||
// now start an https server
|
||||
certPath := filepath.Join(os.TempDir(), "test.crt")
|
||||
keyPath := filepath.Join(os.TempDir(), "test.key")
|
||||
err = ioutil.WriteFile(certPath, []byte(httpsCert), 0666)
|
||||
err = ioutil.WriteFile(certPath, []byte(httpsCert), os.ModePerm)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error writing HTTPS certificate: %v", err)
|
||||
logger.ErrorToConsole("error writing HTTPS certificate: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = ioutil.WriteFile(keyPath, []byte(httpsKey), 0666)
|
||||
err = ioutil.WriteFile(keyPath, []byte(httpsKey), os.ModePerm)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error writing HTTPS private key: %v", err)
|
||||
logger.ErrorToConsole("error writing HTTPS private key: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
httpdConf.BindPort = 8443
|
||||
|
@ -156,7 +158,8 @@ func TestMain(m *testing.M) {
|
|||
|
||||
go func() {
|
||||
if err := httpdConf.Initialize(configDir, true); err != nil {
|
||||
logger.Error(logSender, "", "could not start HTTPS server: %v", err)
|
||||
logger.ErrorToConsole("could not start HTTPS server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
waitTCPListening(fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort))
|
||||
|
@ -1673,11 +1676,11 @@ func TestUpdateUserQuotaUsageMock(t *testing.T) {
|
|||
req, _ = http.NewRequest(http.MethodPut, updateUsedQuotaPath, bytes.NewBuffer([]byte("string")))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
assert.True(t, sftpd.AddQuotaScan(user.Username))
|
||||
assert.True(t, common.QuotaScans.AddUserQuotaScan(user.Username))
|
||||
req, _ = http.NewRequest(http.MethodPut, updateUsedQuotaPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusConflict, rr.Code)
|
||||
assert.NoError(t, sftpd.RemoveQuotaScan(user.Username))
|
||||
assert.True(t, common.QuotaScans.RemoveUserQuotaScan(user.Username))
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
|
@ -1854,12 +1857,11 @@ func TestStartQuotaScanMock(t *testing.T) {
|
|||
}
|
||||
// simulate a duplicate quota scan
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
sftpd.AddQuotaScan(user.Username)
|
||||
common.QuotaScans.AddUserQuotaScan(user.Username)
|
||||
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusConflict, rr.Code)
|
||||
err = sftpd.RemoveQuotaScan(user.Username)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, common.QuotaScans.RemoveUserQuotaScan(user.Username))
|
||||
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
|
@ -1867,7 +1869,7 @@ func TestStartQuotaScanMock(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusCreated, rr.Code)
|
||||
|
||||
for {
|
||||
var scans []sftpd.ActiveQuotaScan
|
||||
var scans []common.ActiveQuotaScan
|
||||
req, _ = http.NewRequest(http.MethodGet, quotaScanPath, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
|
@ -1890,7 +1892,7 @@ func TestStartQuotaScanMock(t *testing.T) {
|
|||
checkResponseCode(t, http.StatusCreated, rr.Code)
|
||||
|
||||
for {
|
||||
var scans []sftpd.ActiveQuotaScan
|
||||
var scans []common.ActiveQuotaScan
|
||||
req, _ = http.NewRequest(http.MethodGet, quotaScanPath, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
|
@ -1954,11 +1956,11 @@ func TestUpdateFolderQuotaUsageMock(t *testing.T) {
|
|||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
|
||||
assert.True(t, sftpd.AddVFolderQuotaScan(mappedPath))
|
||||
assert.True(t, common.QuotaScans.AddVFolderQuotaScan(mappedPath))
|
||||
req, _ = http.NewRequest(http.MethodPut, updateFolderUsedQuotaPath, bytes.NewBuffer(folderAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusConflict, rr.Code)
|
||||
assert.NoError(t, sftpd.RemoveVFolderQuotaScan(mappedPath))
|
||||
assert.True(t, common.QuotaScans.RemoveVFolderQuotaScan(mappedPath))
|
||||
|
||||
url, err = url.Parse(folderPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -1986,12 +1988,11 @@ func TestStartFolderQuotaScanMock(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
// simulate a duplicate quota scan
|
||||
sftpd.AddVFolderQuotaScan(mappedPath)
|
||||
common.QuotaScans.AddVFolderQuotaScan(mappedPath)
|
||||
req, _ = http.NewRequest(http.MethodPost, quotaScanVFolderPath, bytes.NewBuffer(folderAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusConflict, rr.Code)
|
||||
err = sftpd.RemoveVFolderQuotaScan(mappedPath)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, common.QuotaScans.RemoveVFolderQuotaScan(mappedPath))
|
||||
// and now a real quota scan
|
||||
_, err = os.Stat(mappedPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
|
@ -2001,7 +2002,7 @@ func TestStartFolderQuotaScanMock(t *testing.T) {
|
|||
req, _ = http.NewRequest(http.MethodPost, quotaScanVFolderPath, bytes.NewBuffer(folderAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusCreated, rr.Code)
|
||||
var scans []sftpd.ActiveVirtualFolderQuotaScan
|
||||
var scans []common.ActiveVirtualFolderQuotaScan
|
||||
for {
|
||||
req, _ = http.NewRequest(http.MethodGet, quotaScanVFolderPath, nil)
|
||||
rr = executeRequest(req)
|
||||
|
@ -2772,7 +2773,7 @@ func waitTCPListening(address string) {
|
|||
continue
|
||||
}
|
||||
logger.InfoToConsole("tcp server %v now listening\n", address)
|
||||
defer conn.Close()
|
||||
conn.Close()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@ import (
|
|||
"github.com/go-chi/chi"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
@ -526,7 +526,7 @@ func TestQuotaScanInvalidFs(t *testing.T) {
|
|||
Provider: 1,
|
||||
},
|
||||
}
|
||||
sftpd.AddQuotaScan(user.Username)
|
||||
common.QuotaScans.AddUserQuotaScan(user.Username)
|
||||
err := doQuotaScan(user)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
|
|
@ -8,10 +8,10 @@ import (
|
|||
"github.com/go-chi/chi/middleware"
|
||||
"github.com/go-chi/render"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/metrics"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/version"
|
||||
)
|
||||
|
||||
|
@ -68,7 +68,7 @@ func initializeRouter(staticFilesPath string, enableProfiler, enableWebAdmin boo
|
|||
})
|
||||
|
||||
router.Get(activeConnectionsPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
render.JSON(w, r, sftpd.GetConnectionsStats())
|
||||
render.JSON(w, r, common.Connections.GetStats())
|
||||
})
|
||||
|
||||
router.Delete(activeConnectionsPath+"/{connectionID}", handleCloseConnection)
|
||||
|
@ -116,7 +116,7 @@ func handleCloseConnection(w http.ResponseWriter, r *http.Request) {
|
|||
sendAPIResponse(w, r, nil, "connectionID is mandatory", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if sftpd.CloseActiveConnection(connectionID) {
|
||||
if common.Connections.Close(connectionID) {
|
||||
sendAPIResponse(w, r, nil, "Connection closed", http.StatusOK)
|
||||
} else {
|
||||
sendAPIResponse(w, r, nil, "Not Found", http.StatusNotFound)
|
||||
|
|
|
@ -2,7 +2,7 @@ openapi: 3.0.1
|
|||
info:
|
||||
title: SFTPGo
|
||||
description: 'SFTPGo REST API'
|
||||
version: 1.9.1
|
||||
version: 1.9.2
|
||||
|
||||
servers:
|
||||
- url: /api/v1
|
||||
|
@ -1778,10 +1778,6 @@ components:
|
|||
type: integer
|
||||
format: int64
|
||||
description: bytes transferred
|
||||
last_activity:
|
||||
type: integer
|
||||
format: int64
|
||||
description: last transfer activity as unix timestamp in milliseconds
|
||||
ConnectionStatus:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -1793,6 +1789,7 @@ components:
|
|||
description: unique connection identifier
|
||||
client_version:
|
||||
type: string
|
||||
nullable: true
|
||||
description: client version
|
||||
remote_address:
|
||||
type: string
|
||||
|
@ -1803,6 +1800,7 @@ components:
|
|||
description: connection time as unix timestamp in milliseconds
|
||||
ssh_command:
|
||||
type: string
|
||||
nullable: true
|
||||
description: SSH command. This is not empty for protocol SSH
|
||||
last_activity:
|
||||
type: integer
|
||||
|
@ -1816,6 +1814,7 @@ components:
|
|||
- SSH
|
||||
active_transfers:
|
||||
type: array
|
||||
nullable: true
|
||||
items:
|
||||
$ref : '#/components/schemas/Transfer'
|
||||
QuotaScan:
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
package httpd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"sync"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
)
|
||||
|
||||
type certManager struct {
|
||||
cert *tls.Certificate
|
||||
certPath string
|
||||
keyPath string
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
|
||||
func (m *certManager) loadCertificate() error {
|
||||
newCert, err := tls.LoadX509KeyPair(m.certPath, m.keyPath)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "unable to load https certificate: %v", err)
|
||||
return err
|
||||
}
|
||||
logger.Debug(logSender, "", "https certificate successfully loaded")
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
m.cert = &newCert
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *certManager) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
m.lock.RLock()
|
||||
defer m.lock.RUnlock()
|
||||
return m.cert, nil
|
||||
}
|
||||
}
|
||||
|
||||
func newCertManager(certificateFile, certificateKeyFile string) (*certManager, error) {
|
||||
manager := &certManager{
|
||||
cert: nil,
|
||||
certPath: certificateFile,
|
||||
keyPath: certificateKeyFile,
|
||||
lock: new(sync.RWMutex),
|
||||
}
|
||||
err := manager.loadCertificate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return manager, nil
|
||||
}
|
|
@ -15,8 +15,8 @@ import (
|
|||
|
||||
"github.com/go-chi/chi"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/drakkan/sftpgo/version"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
|
@ -77,7 +77,7 @@ type foldersPage struct {
|
|||
|
||||
type connectionsPage struct {
|
||||
basePage
|
||||
Connections []sftpd.ConnectionStatus
|
||||
Connections []common.ConnectionStatus
|
||||
}
|
||||
|
||||
type userPage struct {
|
||||
|
@ -603,7 +603,7 @@ func handleWebUpdateUserPost(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func handleWebGetConnections(w http.ResponseWriter, r *http.Request) {
|
||||
connectionStats := sftpd.GetConnectionsStats()
|
||||
connectionStats := common.Connections.GetStats()
|
||||
data := connectionsPage{
|
||||
basePage: getBasePageData(pageConnectionsTitle, webConnectionsPath),
|
||||
Connections: connectionStats,
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
lumberjack "gopkg.in/natefinch/lumberjack.v2"
|
||||
|
@ -60,9 +59,9 @@ func InitLogger(logFilePath string, logMaxSize int, logMaxBackups int, logMaxAge
|
|||
logger = zerolog.New(rollingLogger)
|
||||
EnableConsoleLogger(level)
|
||||
} else {
|
||||
logger = zerolog.New(logSyncWrapper{
|
||||
logger = zerolog.New(&logSyncWrapper{
|
||||
output: os.Stdout,
|
||||
lock: new(sync.Mutex)})
|
||||
})
|
||||
consoleLogger = zerolog.Nop()
|
||||
}
|
||||
logger = logger.Level(level)
|
||||
|
|
|
@ -6,12 +6,12 @@ import (
|
|||
)
|
||||
|
||||
type logSyncWrapper struct {
|
||||
sync.Mutex
|
||||
output *os.File
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func (l logSyncWrapper) Write(b []byte) (n int, err error) {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
func (l *logSyncWrapper) Write(b []byte) (n int, err error) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
return l.output.Write(b)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/config"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
|
@ -64,6 +65,9 @@ func (s *Service) Start() error {
|
|||
logger.Error(logSender, "", "error loading configuration: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
common.Initialize(config.GetCommonConfig())
|
||||
|
||||
providerConf := config.GetProviderConf()
|
||||
|
||||
err := dataprovider.Initialize(providerConf, s.ConfigDir)
|
||||
|
|
862
sftpd/handler.go
862
sftpd/handler.go
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
238
sftpd/scp.go
238
sftpd/scp.go
|
@ -1,7 +1,6 @@
|
|||
package sftpd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -10,9 +9,8 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
|
@ -20,11 +18,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
okMsg = []byte{0x00}
|
||||
warnMsg = []byte{0x01} // must be followed by an optional message and a newline
|
||||
errMsg = []byte{0x02} // must be followed by an optional message and a newline
|
||||
newLine = []byte{0x0A}
|
||||
errPermission = errors.New("permission denied")
|
||||
okMsg = []byte{0x00}
|
||||
warnMsg = []byte{0x01} // must be followed by an optional message and a newline
|
||||
errMsg = []byte{0x02} // must be followed by an optional message and a newline
|
||||
newLine = []byte{0x0A}
|
||||
)
|
||||
|
||||
type scpCommand struct {
|
||||
|
@ -32,12 +29,13 @@ type scpCommand struct {
|
|||
}
|
||||
|
||||
func (c *scpCommand) handle() error {
|
||||
common.Connections.Add(c.connection)
|
||||
defer common.Connections.Remove(c.connection)
|
||||
|
||||
var err error
|
||||
addConnection(c.connection)
|
||||
defer removeConnection(c.connection)
|
||||
destPath := c.getDestPath()
|
||||
commandType := c.getCommandType()
|
||||
c.connection.Log(logger.LevelDebug, logSenderSCP, "handle scp command, args: %v user: %v command type: %v, dest path: %#v",
|
||||
c.connection.Log(logger.LevelDebug, "handle scp command, args: %v user: %v command type: %v, dest path: %#v",
|
||||
c.args, c.connection.User.Username, commandType, destPath)
|
||||
if commandType == "-t" {
|
||||
// -t means "to", so upload
|
||||
|
@ -57,7 +55,7 @@ func (c *scpCommand) handle() error {
|
|||
}
|
||||
} else {
|
||||
err = fmt.Errorf("scp command not supported, args: %v", c.args)
|
||||
c.connection.Log(logger.LevelDebug, logSenderSCP, "unsupported scp command, args: %v", c.args)
|
||||
c.connection.Log(logger.LevelDebug, "unsupported scp command, args: %v", c.args)
|
||||
}
|
||||
c.sendExitStatus(err)
|
||||
return err
|
||||
|
@ -78,7 +76,7 @@ func (c *scpCommand) handleRecursiveUpload() error {
|
|||
}
|
||||
if strings.HasPrefix(command, "E") {
|
||||
numDirs--
|
||||
c.connection.Log(logger.LevelDebug, logSenderSCP, "received end dir command, num dirs: %v", numDirs)
|
||||
c.connection.Log(logger.LevelDebug, "received end dir command, num dirs: %v", numDirs)
|
||||
if numDirs == 0 {
|
||||
// upload is now complete send confirmation message
|
||||
err = c.sendConfirmationMessage()
|
||||
|
@ -101,7 +99,7 @@ func (c *scpCommand) handleRecursiveUpload() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.connection.Log(logger.LevelDebug, logSenderSCP, "received start dir command, num dirs: %v destPath: %#v", numDirs, destPath)
|
||||
c.connection.Log(logger.LevelDebug, "received start dir command, num dirs: %v destPath: %#v", numDirs, destPath)
|
||||
} else if strings.HasPrefix(command, "C") {
|
||||
err = c.handleUpload(c.getFileUploadDestPath(destPath, name), sizeToRead)
|
||||
if err != nil {
|
||||
|
@ -117,29 +115,29 @@ func (c *scpCommand) handleRecursiveUpload() error {
|
|||
}
|
||||
|
||||
func (c *scpCommand) handleCreateDir(dirPath string) error {
|
||||
updateConnectionActivity(c.connection.ID)
|
||||
p, err := c.connection.fs.ResolvePath(dirPath)
|
||||
c.connection.UpdateLastActivity()
|
||||
p, err := c.connection.Fs.ResolvePath(dirPath)
|
||||
if err != nil {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error creating dir: %#v, invalid file path, err: %v", dirPath, err)
|
||||
c.connection.Log(logger.LevelWarn, "error creating dir: %#v, invalid file path, err: %v", dirPath, err)
|
||||
c.sendErrorMessage(err)
|
||||
return err
|
||||
}
|
||||
if !c.connection.User.HasPerm(dataprovider.PermCreateDirs, path.Dir(dirPath)) {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error creating dir: %#v, permission denied", dirPath)
|
||||
c.sendErrorMessage(errPermission)
|
||||
return errPermission
|
||||
c.connection.Log(logger.LevelWarn, "error creating dir: %#v, permission denied", dirPath)
|
||||
c.sendErrorMessage(common.ErrPermissionDenied)
|
||||
return common.ErrPermissionDenied
|
||||
}
|
||||
|
||||
err = c.createDir(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.connection.Log(logger.LevelDebug, mkdirLogSender, "created dir %#v", dirPath)
|
||||
c.connection.Log(logger.LevelDebug, "created dir %#v", dirPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// we need to close the transfer if we have an error
|
||||
func (c *scpCommand) getUploadFileData(sizeToRead int64, transfer *Transfer) error {
|
||||
func (c *scpCommand) getUploadFileData(sizeToRead int64, transfer *transfer) error {
|
||||
err := c.sendConfirmationMessage()
|
||||
if err != nil {
|
||||
transfer.TransferError(err)
|
||||
|
@ -188,17 +186,17 @@ func (c *scpCommand) getUploadFileData(sizeToRead int64, transfer *Transfer) err
|
|||
}
|
||||
|
||||
func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead int64, isNewFile bool, fileSize int64, requestPath string) error {
|
||||
quotaResult := c.connection.hasSpace(isNewFile, requestPath)
|
||||
quotaResult := c.connection.HasSpace(isNewFile, requestPath)
|
||||
if !quotaResult.HasSpace {
|
||||
err := fmt.Errorf("denying file write due to quota limits")
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error uploading file: %#v, err: %v", filePath, err)
|
||||
c.connection.Log(logger.LevelWarn, "error uploading file: %#v, err: %v", filePath, err)
|
||||
c.sendErrorMessage(err)
|
||||
return err
|
||||
}
|
||||
|
||||
file, w, cancelFn, err := c.connection.fs.Create(filePath, 0)
|
||||
file, w, cancelFn, err := c.connection.Fs.Create(filePath, 0)
|
||||
if err != nil {
|
||||
c.connection.Log(logger.LevelError, logSenderSCP, "error creating file %#v: %v", resolvedPath, err)
|
||||
c.connection.Log(logger.LevelError, "error creating file %#v: %v", resolvedPath, err)
|
||||
c.sendErrorMessage(err)
|
||||
return err
|
||||
}
|
||||
|
@ -206,7 +204,7 @@ func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead
|
|||
initialSize := int64(0)
|
||||
maxWriteSize := quotaResult.GetRemainingSize()
|
||||
if !isNewFile {
|
||||
if vfs.IsLocalOsFs(c.connection.fs) {
|
||||
if vfs.IsLocalOsFs(c.connection.Fs) {
|
||||
vfolder, err := c.connection.User.GetVirtualFolderForPath(path.Dir(requestPath))
|
||||
if err == nil {
|
||||
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, 0, -fileSize, false) //nolint:errcheck
|
||||
|
@ -224,91 +222,70 @@ func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead
|
|||
}
|
||||
}
|
||||
|
||||
vfs.SetPathPermissions(c.connection.fs, filePath, c.connection.User.GetUID(), c.connection.User.GetGID())
|
||||
vfs.SetPathPermissions(c.connection.Fs, filePath, c.connection.User.GetUID(), c.connection.User.GetGID())
|
||||
|
||||
transfer := Transfer{
|
||||
file: file,
|
||||
readerAt: nil,
|
||||
writerAt: w,
|
||||
cancelFn: cancelFn,
|
||||
path: resolvedPath,
|
||||
start: time.Now(),
|
||||
bytesSent: 0,
|
||||
bytesReceived: 0,
|
||||
user: c.connection.User,
|
||||
connectionID: c.connection.ID,
|
||||
transferType: transferUpload,
|
||||
lastActivity: time.Now(),
|
||||
isNewFile: isNewFile,
|
||||
protocol: c.connection.protocol,
|
||||
transferError: nil,
|
||||
isFinished: false,
|
||||
minWriteOffset: 0,
|
||||
initialSize: initialSize,
|
||||
requestPath: requestPath,
|
||||
maxWriteSize: maxWriteSize,
|
||||
lock: new(sync.Mutex),
|
||||
}
|
||||
addTransfer(&transfer)
|
||||
baseTransfer := common.NewBaseTransfer(file, c.connection.BaseConnection, cancelFn, resolvedPath, requestPath,
|
||||
common.TransferUpload, 0, initialSize, isNewFile)
|
||||
t := newTranfer(baseTransfer, w, nil, maxWriteSize)
|
||||
|
||||
return c.getUploadFileData(sizeToRead, &transfer)
|
||||
return c.getUploadFileData(sizeToRead, t)
|
||||
}
|
||||
|
||||
func (c *scpCommand) handleUpload(uploadFilePath string, sizeToRead int64) error {
|
||||
c.connection.UpdateLastActivity()
|
||||
|
||||
var err error
|
||||
|
||||
updateConnectionActivity(c.connection.ID)
|
||||
|
||||
if !c.connection.User.IsFileAllowed(uploadFilePath) {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "writing file %#v is not allowed", uploadFilePath)
|
||||
c.sendErrorMessage(errPermission)
|
||||
c.connection.Log(logger.LevelWarn, "writing file %#v is not allowed", uploadFilePath)
|
||||
c.sendErrorMessage(common.ErrPermissionDenied)
|
||||
}
|
||||
|
||||
p, err := c.connection.fs.ResolvePath(uploadFilePath)
|
||||
p, err := c.connection.Fs.ResolvePath(uploadFilePath)
|
||||
if err != nil {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error uploading file: %#v, err: %v", uploadFilePath, err)
|
||||
c.sendErrorMessage(err)
|
||||
c.connection.Log(logger.LevelWarn, "error uploading file: %#v, err: %v", uploadFilePath, err)
|
||||
c.sendErrorMessage(c.connection.GetFsError(err))
|
||||
return err
|
||||
}
|
||||
filePath := p
|
||||
if isAtomicUploadEnabled() && c.connection.fs.IsAtomicUploadSupported() {
|
||||
filePath = c.connection.fs.GetAtomicUploadPath(p)
|
||||
if common.Config.IsAtomicUploadEnabled() && c.connection.Fs.IsAtomicUploadSupported() {
|
||||
filePath = c.connection.Fs.GetAtomicUploadPath(p)
|
||||
}
|
||||
stat, statErr := c.connection.fs.Lstat(p)
|
||||
if (statErr == nil && stat.Mode()&os.ModeSymlink == os.ModeSymlink) || c.connection.fs.IsNotExist(statErr) {
|
||||
stat, statErr := c.connection.Fs.Lstat(p)
|
||||
if (statErr == nil && stat.Mode()&os.ModeSymlink == os.ModeSymlink) || c.connection.Fs.IsNotExist(statErr) {
|
||||
if !c.connection.User.HasPerm(dataprovider.PermUpload, path.Dir(uploadFilePath)) {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "cannot upload file: %#v, permission denied", uploadFilePath)
|
||||
c.sendErrorMessage(errPermission)
|
||||
return errPermission
|
||||
c.connection.Log(logger.LevelWarn, "cannot upload file: %#v, permission denied", uploadFilePath)
|
||||
c.sendErrorMessage(common.ErrPermissionDenied)
|
||||
return common.ErrPermissionDenied
|
||||
}
|
||||
return c.handleUploadFile(p, filePath, sizeToRead, true, 0, uploadFilePath)
|
||||
}
|
||||
|
||||
if statErr != nil {
|
||||
c.connection.Log(logger.LevelError, logSenderSCP, "error performing file stat %#v: %v", p, statErr)
|
||||
c.connection.Log(logger.LevelError, "error performing file stat %#v: %v", p, statErr)
|
||||
c.sendErrorMessage(statErr)
|
||||
return statErr
|
||||
}
|
||||
|
||||
if stat.IsDir() {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "attempted to open a directory for writing to: %#v", p)
|
||||
c.connection.Log(logger.LevelWarn, "attempted to open a directory for writing to: %#v", p)
|
||||
err = fmt.Errorf("Attempted to open a directory for writing: %#v", p)
|
||||
c.sendErrorMessage(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.connection.User.HasPerm(dataprovider.PermOverwrite, uploadFilePath) {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "cannot overwrite file: %#v, permission denied", uploadFilePath)
|
||||
c.sendErrorMessage(errPermission)
|
||||
return errPermission
|
||||
c.connection.Log(logger.LevelWarn, "cannot overwrite file: %#v, permission denied", uploadFilePath)
|
||||
c.sendErrorMessage(common.ErrPermissionDenied)
|
||||
return common.ErrPermissionDenied
|
||||
}
|
||||
|
||||
if isAtomicUploadEnabled() && c.connection.fs.IsAtomicUploadSupported() {
|
||||
err = c.connection.fs.Rename(p, filePath)
|
||||
if common.Config.IsAtomicUploadEnabled() && c.connection.Fs.IsAtomicUploadSupported() {
|
||||
err = c.connection.Fs.Rename(p, filePath)
|
||||
if err != nil {
|
||||
c.connection.Log(logger.LevelError, logSenderSCP, "error renaming existing file for atomic upload, source: %#v, dest: %#v, err: %v",
|
||||
c.connection.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %#v, dest: %#v, err: %v",
|
||||
p, filePath, err)
|
||||
c.sendErrorMessage(err)
|
||||
c.sendErrorMessage(c.connection.GetFsError(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -353,20 +330,20 @@ func (c *scpCommand) sendDownloadProtocolMessages(dirPath string, stat os.FileIn
|
|||
func (c *scpCommand) handleRecursiveDownload(dirPath string, stat os.FileInfo) error {
|
||||
var err error
|
||||
if c.isRecursive() {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSCP, "recursive download, dir path: %#v", dirPath)
|
||||
c.connection.Log(logger.LevelDebug, "recursive download, dir path: %#v", dirPath)
|
||||
err = c.sendDownloadProtocolMessages(dirPath, stat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files, err := c.connection.fs.ReadDir(dirPath)
|
||||
files = c.connection.User.AddVirtualDirs(files, c.connection.fs.GetRelativePath(dirPath))
|
||||
files, err := c.connection.Fs.ReadDir(dirPath)
|
||||
files = c.connection.User.AddVirtualDirs(files, c.connection.Fs.GetRelativePath(dirPath))
|
||||
if err != nil {
|
||||
c.sendErrorMessage(err)
|
||||
return err
|
||||
}
|
||||
var dirs []string
|
||||
for _, file := range files {
|
||||
filePath := c.connection.fs.GetRelativePath(c.connection.fs.Join(dirPath, file.Name()))
|
||||
filePath := c.connection.Fs.GetRelativePath(c.connection.Fs.Join(dirPath, file.Name()))
|
||||
if file.Mode().IsRegular() || file.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
err = c.handleDownload(filePath)
|
||||
if err != nil {
|
||||
|
@ -405,7 +382,7 @@ func (c *scpCommand) handleRecursiveDownload(dirPath string, stat os.FileInfo) e
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *scpCommand) sendDownloadFileData(filePath string, stat os.FileInfo, transfer *Transfer) error {
|
||||
func (c *scpCommand) sendDownloadFileData(filePath string, stat os.FileInfo, transfer *transfer) error {
|
||||
var err error
|
||||
if c.sendFileTime() {
|
||||
modTime := stat.ModTime().UnixNano() / 1000000000
|
||||
|
@ -459,83 +436,64 @@ func (c *scpCommand) sendDownloadFileData(filePath string, stat os.FileInfo, tra
|
|||
}
|
||||
|
||||
func (c *scpCommand) handleDownload(filePath string) error {
|
||||
c.connection.UpdateLastActivity()
|
||||
var err error
|
||||
|
||||
updateConnectionActivity(c.connection.ID)
|
||||
|
||||
p, err := c.connection.fs.ResolvePath(filePath)
|
||||
p, err := c.connection.Fs.ResolvePath(filePath)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Invalid file path")
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error downloading file: %#v, invalid file path", filePath)
|
||||
c.sendErrorMessage(err)
|
||||
c.connection.Log(logger.LevelWarn, "error downloading file: %#v, invalid file path", filePath)
|
||||
c.sendErrorMessage(c.connection.GetFsError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
var stat os.FileInfo
|
||||
if stat, err = c.connection.fs.Stat(p); err != nil {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error downloading file: %#v, err: %v", p, err)
|
||||
c.sendErrorMessage(err)
|
||||
if stat, err = c.connection.Fs.Stat(p); err != nil {
|
||||
c.connection.Log(logger.LevelWarn, "error downloading file: %#v, err: %v", p, err)
|
||||
c.sendErrorMessage(c.connection.GetFsError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
if stat.IsDir() {
|
||||
if !c.connection.User.HasPerm(dataprovider.PermDownload, filePath) {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error downloading dir: %#v, permission denied", filePath)
|
||||
c.sendErrorMessage(errPermission)
|
||||
return errPermission
|
||||
c.connection.Log(logger.LevelWarn, "error downloading dir: %#v, permission denied", filePath)
|
||||
c.sendErrorMessage(common.ErrPermissionDenied)
|
||||
return common.ErrPermissionDenied
|
||||
}
|
||||
err = c.handleRecursiveDownload(p, stat)
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.connection.User.HasPerm(dataprovider.PermDownload, path.Dir(filePath)) {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error downloading dir: %#v, permission denied", filePath)
|
||||
c.sendErrorMessage(errPermission)
|
||||
return errPermission
|
||||
c.connection.Log(logger.LevelWarn, "error downloading dir: %#v, permission denied", filePath)
|
||||
c.sendErrorMessage(common.ErrPermissionDenied)
|
||||
return common.ErrPermissionDenied
|
||||
}
|
||||
|
||||
if !c.connection.User.IsFileAllowed(filePath) {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "reading file %#v is not allowed", filePath)
|
||||
c.sendErrorMessage(errPermission)
|
||||
c.connection.Log(logger.LevelWarn, "reading file %#v is not allowed", filePath)
|
||||
c.sendErrorMessage(common.ErrPermissionDenied)
|
||||
}
|
||||
|
||||
file, r, cancelFn, err := c.connection.fs.Open(p)
|
||||
file, r, cancelFn, err := c.connection.Fs.Open(p)
|
||||
if err != nil {
|
||||
c.connection.Log(logger.LevelError, logSenderSCP, "could not open file %#v for reading: %v", p, err)
|
||||
c.sendErrorMessage(err)
|
||||
c.connection.Log(logger.LevelError, "could not open file %#v for reading: %v", p, err)
|
||||
c.sendErrorMessage(c.connection.GetFsError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
transfer := Transfer{
|
||||
file: file,
|
||||
readerAt: r,
|
||||
writerAt: nil,
|
||||
cancelFn: cancelFn,
|
||||
path: p,
|
||||
start: time.Now(),
|
||||
bytesSent: 0,
|
||||
bytesReceived: 0,
|
||||
user: c.connection.User,
|
||||
connectionID: c.connection.ID,
|
||||
transferType: transferDownload,
|
||||
lastActivity: time.Now(),
|
||||
isNewFile: false,
|
||||
protocol: c.connection.protocol,
|
||||
transferError: nil,
|
||||
isFinished: false,
|
||||
minWriteOffset: 0,
|
||||
lock: new(sync.Mutex),
|
||||
}
|
||||
addTransfer(&transfer)
|
||||
baseTransfer := common.NewBaseTransfer(file, c.connection.BaseConnection, cancelFn, p, filePath,
|
||||
common.TransferDownload, 0, 0, false)
|
||||
t := newTranfer(baseTransfer, nil, r, 0)
|
||||
|
||||
err = c.sendDownloadFileData(p, stat, &transfer)
|
||||
err = c.sendDownloadFileData(p, stat, t)
|
||||
// we need to call Close anyway and return close error if any and
|
||||
// if we have no previous error
|
||||
if err == nil {
|
||||
err = transfer.Close()
|
||||
err = t.Close()
|
||||
} else {
|
||||
transfer.TransferError(err)
|
||||
transfer.Close()
|
||||
t.TransferError(err)
|
||||
t.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -574,7 +532,7 @@ func (c *scpCommand) readConfirmationMessage() error {
|
|||
msg.WriteString(string(readed))
|
||||
}
|
||||
}
|
||||
c.connection.Log(logger.LevelInfo, logSenderSCP, "scp error message received: %v is error: %v", msg.String(), isError)
|
||||
c.connection.Log(logger.LevelInfo, "scp error message received: %v is error: %v", msg.String(), isError)
|
||||
err = fmt.Errorf("%v", msg.String())
|
||||
c.connection.channel.Close()
|
||||
}
|
||||
|
@ -610,7 +568,7 @@ func (c *scpCommand) readProtocolMessage() (string, error) {
|
|||
//nolint:errcheck // we don't check write errors here, we have to close the channel anyway
|
||||
func (c *scpCommand) sendErrorMessage(err error) {
|
||||
c.connection.channel.Write(errMsg)
|
||||
c.connection.channel.Write([]byte(c.getMappedError(err).Error()))
|
||||
c.connection.channel.Write([]byte(c.connection.GetFsError(err).Error()))
|
||||
c.connection.channel.Write(newLine)
|
||||
c.connection.channel.Close()
|
||||
}
|
||||
|
@ -628,7 +586,7 @@ func (c *scpCommand) sendConfirmationMessage() error {
|
|||
func (c *scpCommand) sendProtocolMessage(message string) error {
|
||||
_, err := c.connection.channel.Write([]byte(message))
|
||||
if err != nil {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error sending protocol message: %v, err: %v", message, err)
|
||||
c.connection.Log(logger.LevelWarn, "error sending protocol message: %v, err: %v", message, err)
|
||||
c.connection.channel.Close()
|
||||
}
|
||||
return err
|
||||
|
@ -659,17 +617,17 @@ func (c *scpCommand) getNextUploadProtocolMessage() (string, error) {
|
|||
func (c *scpCommand) createDir(dirPath string) error {
|
||||
var err error
|
||||
var isDir bool
|
||||
isDir, err = vfs.IsDirectory(c.connection.fs, dirPath)
|
||||
isDir, err = vfs.IsDirectory(c.connection.Fs, dirPath)
|
||||
if err == nil && isDir {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSCP, "directory %#v already exists", dirPath)
|
||||
c.connection.Log(logger.LevelDebug, "directory %#v already exists", dirPath)
|
||||
return nil
|
||||
}
|
||||
if err = c.connection.fs.Mkdir(dirPath); err != nil {
|
||||
c.connection.Log(logger.LevelError, logSenderSCP, "error creating dir %#v: %v", dirPath, err)
|
||||
c.sendErrorMessage(err)
|
||||
if err = c.connection.Fs.Mkdir(dirPath); err != nil {
|
||||
c.connection.Log(logger.LevelError, "error creating dir %#v: %v", dirPath, err)
|
||||
c.sendErrorMessage(c.connection.GetFsError(err))
|
||||
return err
|
||||
}
|
||||
vfs.SetPathPermissions(c.connection.fs, dirPath, c.connection.User.GetUID(), c.connection.User.GetGID())
|
||||
vfs.SetPathPermissions(c.connection.Fs, dirPath, c.connection.User.GetUID(), c.connection.User.GetGID())
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -685,7 +643,7 @@ func (c *scpCommand) parseUploadMessage(command string) (int64, string, error) {
|
|||
if !strings.HasPrefix(command, "C") && !strings.HasPrefix(command, "D") {
|
||||
err = fmt.Errorf("unknown or invalid upload message: %v args: %v user: %v",
|
||||
command, c.args, c.connection.User.Username)
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error: %v", err)
|
||||
c.connection.Log(logger.LevelWarn, "error: %v", err)
|
||||
c.sendErrorMessage(err)
|
||||
return size, name, err
|
||||
}
|
||||
|
@ -693,20 +651,20 @@ func (c *scpCommand) parseUploadMessage(command string) (int64, string, error) {
|
|||
if len(parts) == 3 {
|
||||
size, err = strconv.ParseInt(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error getting size from upload message: %v", err)
|
||||
c.connection.Log(logger.LevelWarn, "error getting size from upload message: %v", err)
|
||||
c.sendErrorMessage(err)
|
||||
return size, name, err
|
||||
}
|
||||
name = parts[2]
|
||||
if len(name) == 0 {
|
||||
err = fmt.Errorf("error getting name from upload message, cannot be empty")
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error: %v", err)
|
||||
c.connection.Log(logger.LevelWarn, "error: %v", err)
|
||||
c.sendErrorMessage(err)
|
||||
return size, name, err
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("Error splitting upload message: %#v", command)
|
||||
c.connection.Log(logger.LevelWarn, logSenderSCP, "error: %v", err)
|
||||
c.connection.Log(logger.LevelWarn, "error: %v", err)
|
||||
c.sendErrorMessage(err)
|
||||
return size, name, err
|
||||
}
|
||||
|
@ -724,8 +682,8 @@ func (c *scpCommand) getFileUploadDestPath(scpDestPath, fileName string) string
|
|||
// but if scpDestPath is an existing directory then we put the uploaded file
|
||||
// inside that directory this is as scp command works, for example:
|
||||
// scp fileName.txt user@127.0.0.1:/existing_dir
|
||||
if p, err := c.connection.fs.ResolvePath(scpDestPath); err == nil {
|
||||
if stat, err := c.connection.fs.Stat(p); err == nil {
|
||||
if p, err := c.connection.Fs.ResolvePath(scpDestPath); err == nil {
|
||||
if stat, err := c.connection.Fs.Stat(p); err == nil {
|
||||
if stat.IsDir() {
|
||||
return path.Join(scpDestPath, fileName)
|
||||
}
|
||||
|
|
142
sftpd/server.go
142
sftpd/server.go
|
@ -11,14 +11,13 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pires/go-proxyproto"
|
||||
"github.com/pkg/sftp"
|
||||
"golang.org/x/crypto/ssh"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/metrics"
|
||||
|
@ -43,27 +42,16 @@ type Configuration struct {
|
|||
BindPort int `json:"bind_port" mapstructure:"bind_port"`
|
||||
// The address to listen on. A blank value means listen on all available network interfaces.
|
||||
BindAddress string `json:"bind_address" mapstructure:"bind_address"`
|
||||
// Maximum idle timeout as minutes. If a client is idle for a time that exceeds this setting it will be disconnected.
|
||||
// 0 means disabled
|
||||
// Deprecated: please use the same key in common configuration
|
||||
IdleTimeout int `json:"idle_timeout" mapstructure:"idle_timeout"`
|
||||
// Maximum number of authentication attempts permitted per connection.
|
||||
// If set to a negative number, the number of attempts is unlimited.
|
||||
// If set to zero, the number of attempts are limited to 6.
|
||||
MaxAuthTries int `json:"max_auth_tries" mapstructure:"max_auth_tries"`
|
||||
// Umask for new files
|
||||
Umask string `json:"umask" mapstructure:"umask"`
|
||||
// UploadMode 0 means standard, the files are uploaded directly to the requested path.
|
||||
// 1 means atomic: the files are uploaded to a temporary path and renamed to the requested path
|
||||
// when the client ends the upload. Atomic mode avoid problems such as a web server that
|
||||
// serves partial files when the files are being uploaded.
|
||||
// In atomic mode if there is an upload error the temporary file is deleted and so the requested
|
||||
// upload path will not contain a partial file.
|
||||
// 2 means atomic with resume support: as atomic but if there is an upload error the temporary
|
||||
// file is renamed to the requested path and not deleted, this way a client can reconnect and resume
|
||||
// the upload.
|
||||
// Deprecated: please use the same key in common configuration
|
||||
UploadMode int `json:"upload_mode" mapstructure:"upload_mode"`
|
||||
// Actions to execute on SFTP create, download, delete and rename
|
||||
Actions Actions `json:"actions" mapstructure:"actions"`
|
||||
// Actions to execute on file operations and SSH commands
|
||||
Actions common.ProtocolActions `json:"actions" mapstructure:"actions"`
|
||||
// Deprecated: please use HostKeys
|
||||
Keys []Key `json:"keys" mapstructure:"keys"`
|
||||
// HostKeys define the daemon's private host keys.
|
||||
|
@ -86,8 +74,7 @@ type Configuration struct {
|
|||
// LoginBannerFile the contents of the specified file, if any, are sent to
|
||||
// the remote user before authentication is allowed.
|
||||
LoginBannerFile string `json:"login_banner_file" mapstructure:"login_banner_file"`
|
||||
// SetstatMode 0 means "normal mode": requests for changing permissions and owner/group are executed.
|
||||
// 1 means "ignore mode": requests for changing permissions and owner/group are silently ignored.
|
||||
// Deprecated: please use the same key in common configuration
|
||||
SetstatMode int `json:"setstat_mode" mapstructure:"setstat_mode"`
|
||||
// List of enabled SSH commands.
|
||||
// We support the following SSH commands:
|
||||
|
@ -110,27 +97,12 @@ type Configuration struct {
|
|||
// The following SSH commands are enabled by default: "md5sum", "sha1sum", "cd", "pwd".
|
||||
// "*" enables all supported SSH commands.
|
||||
EnabledSSHCommands []string `json:"enabled_ssh_commands" mapstructure:"enabled_ssh_commands"`
|
||||
// Deprecated: please use KeyboardInteractiveHook
|
||||
KeyboardInteractiveProgram string `json:"keyboard_interactive_auth_program" mapstructure:"keyboard_interactive_auth_program"`
|
||||
// Absolute path to an external program or an HTTP URL to invoke for keyboard interactive authentication.
|
||||
// Leave empty to disable this authentication mode.
|
||||
KeyboardInteractiveHook string `json:"keyboard_interactive_auth_hook" mapstructure:"keyboard_interactive_auth_hook"`
|
||||
// Support for HAProxy PROXY protocol.
|
||||
// If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGNIX, you can enable
|
||||
// the proxy protocol. It provides a convenient way to safely transport connection information
|
||||
// such as a client's address across multiple layers of NAT or TCP proxies to get the real
|
||||
// client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported.
|
||||
// - 0 means disabled
|
||||
// - 1 means proxy protocol enabled. Proxy header will be used and requests without proxy header will be accepted.
|
||||
// - 2 means proxy protocol required. Proxy header will be used and requests without proxy header will be rejected.
|
||||
// If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too,
|
||||
// for example for HAProxy add "send-proxy" or "send-proxy-v2" to each server configuration line.
|
||||
// Deprecated: please use the same key in common configuration
|
||||
ProxyProtocol int `json:"proxy_protocol" mapstructure:"proxy_protocol"`
|
||||
// List of IP addresses and IP ranges allowed to send the proxy header.
|
||||
// If proxy protocol is set to 1 and we receive a proxy header from an IP that is not in the list then the
|
||||
// connection will be accepted and the header will be ignored.
|
||||
// If proxy protocol is set to 2 and we receive a proxy header from an IP that is not in the list then the
|
||||
// connection will be rejected.
|
||||
// Deprecated: please use the same key in common configuration
|
||||
ProxyAllowed []string `json:"proxy_allowed" mapstructure:"proxy_allowed"`
|
||||
certChecker *ssh.CertChecker
|
||||
parsedUserCAKeys []ssh.PublicKey
|
||||
|
@ -153,13 +125,6 @@ func (e *authenticationError) Error() string {
|
|||
|
||||
// Initialize the SFTP server and add a persistent listener to handle inbound SFTP connections.
|
||||
func (c Configuration) Initialize(configDir string) error {
|
||||
umask, err := strconv.ParseUint(c.Umask, 8, 8)
|
||||
if err == nil {
|
||||
utils.SetUmask(int(umask), c.Umask)
|
||||
} else {
|
||||
logger.Warn(logSender, "", "error reading umask, please fix your config file: %v", err)
|
||||
logger.WarnToConsole("error reading umask, please fix your config file: %v", err)
|
||||
}
|
||||
serverConfig := &ssh.ServerConfig{
|
||||
NoClientAuth: false,
|
||||
MaxAuthTries: c.MaxAuthTries,
|
||||
|
@ -193,11 +158,11 @@ func (c Configuration) Initialize(configDir string) error {
|
|||
ServerVersion: fmt.Sprintf("SSH-2.0-%v", c.Banner),
|
||||
}
|
||||
|
||||
if err = c.checkAndLoadHostKeys(configDir, serverConfig); err != nil {
|
||||
if err := c.checkAndLoadHostKeys(configDir, serverConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.initializeCertChecker(configDir); err != nil {
|
||||
if err := c.initializeCertChecker(configDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -213,16 +178,12 @@ func (c Configuration) Initialize(configDir string) error {
|
|||
logger.Warn(logSender, "", "error starting listener on address %s:%d: %v", c.BindAddress, c.BindPort, err)
|
||||
return err
|
||||
}
|
||||
proxyListener, err := c.getProxyListener(listener)
|
||||
proxyListener, err := common.Config.GetProxyListener(listener)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error enabling proxy listener: %v", err)
|
||||
return err
|
||||
}
|
||||
actions = c.Actions
|
||||
uploadMode = c.UploadMode
|
||||
setstatMode = c.SetstatMode
|
||||
logger.Info(logSender, "", "server listener registered address: %v", listener.Addr().String())
|
||||
c.checkIdleTimer()
|
||||
|
||||
for {
|
||||
var conn net.Conn
|
||||
|
@ -237,43 +198,6 @@ func (c Configuration) Initialize(configDir string) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Configuration) getProxyListener(listener net.Listener) (*proxyproto.Listener, error) {
|
||||
var proxyListener *proxyproto.Listener
|
||||
var err error
|
||||
if c.ProxyProtocol > 0 {
|
||||
var policyFunc func(upstream net.Addr) (proxyproto.Policy, error)
|
||||
if c.ProxyProtocol == 1 && len(c.ProxyAllowed) > 0 {
|
||||
policyFunc, err = proxyproto.LaxWhiteListPolicy(c.ProxyAllowed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if c.ProxyProtocol == 2 {
|
||||
if len(c.ProxyAllowed) == 0 {
|
||||
policyFunc = func(upstream net.Addr) (proxyproto.Policy, error) {
|
||||
return proxyproto.REQUIRE, nil
|
||||
}
|
||||
} else {
|
||||
policyFunc, err = proxyproto.StrictWhiteListPolicy(c.ProxyAllowed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
proxyListener = &proxyproto.Listener{
|
||||
Listener: listener,
|
||||
Policy: policyFunc,
|
||||
}
|
||||
}
|
||||
return proxyListener, nil
|
||||
}
|
||||
|
||||
func (c Configuration) checkIdleTimer() {
|
||||
if c.IdleTimeout > 0 {
|
||||
startIdleTimer(time.Duration(c.IdleTimeout) * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func (c Configuration) configureSecurityOptions(serverConfig *ssh.ServerConfig) {
|
||||
if len(c.KexAlgorithms) > 0 {
|
||||
serverConfig.KeyExchanges = c.KexAlgorithms
|
||||
|
@ -368,20 +292,16 @@ func (c Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.Server
|
|||
}
|
||||
|
||||
connection := Connection{
|
||||
ID: connectionID,
|
||||
User: user,
|
||||
ClientVersion: string(sconn.ClientVersion()),
|
||||
RemoteAddr: remoteAddr,
|
||||
StartTime: time.Now(),
|
||||
lastActivity: time.Now(),
|
||||
netConn: conn,
|
||||
channel: nil,
|
||||
fs: fs,
|
||||
BaseConnection: common.NewBaseConnection(connectionID, "sftpd", user, fs),
|
||||
ClientVersion: string(sconn.ClientVersion()),
|
||||
RemoteAddr: remoteAddr,
|
||||
netConn: conn,
|
||||
channel: nil,
|
||||
}
|
||||
|
||||
connection.fs.CheckRootPath(user.Username, user.GetUID(), user.GetGID())
|
||||
connection.Fs.CheckRootPath(user.Username, user.GetUID(), user.GetGID())
|
||||
|
||||
connection.Log(logger.LevelInfo, logSender, "User id: %d, logged in with: %#v, username: %#v, home_dir: %#v remote addr: %#v",
|
||||
connection.Log(logger.LevelInfo, "User id: %d, logged in with: %#v, username: %#v, home_dir: %#v remote addr: %#v",
|
||||
user.ID, loginType, user.Username, user.HomeDir, remoteAddr.String())
|
||||
dataprovider.UpdateLastLogin(user) //nolint:errcheck
|
||||
|
||||
|
@ -391,14 +311,14 @@ func (c Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.Server
|
|||
// If its not a session channel we just move on because its not something we
|
||||
// know how to handle at this point.
|
||||
if newChannel.ChannelType() != "session" {
|
||||
connection.Log(logger.LevelDebug, logSender, "received an unknown channel type: %v", newChannel.ChannelType())
|
||||
connection.Log(logger.LevelDebug, "received an unknown channel type: %v", newChannel.ChannelType())
|
||||
newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") //nolint:errcheck
|
||||
continue
|
||||
}
|
||||
|
||||
channel, requests, err := newChannel.Accept()
|
||||
if err != nil {
|
||||
connection.Log(logger.LevelWarn, logSender, "could not accept a channel: %v", err)
|
||||
connection.Log(logger.LevelWarn, "could not accept a channel: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -412,11 +332,12 @@ func (c Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.Server
|
|||
case "subsystem":
|
||||
if string(req.Payload[4:]) == "sftp" {
|
||||
ok = true
|
||||
connection.protocol = protocolSFTP
|
||||
connection.SetProtocol(common.ProtocolSFTP)
|
||||
connection.channel = channel
|
||||
go c.handleSftpConnection(channel, connection)
|
||||
go c.handleSftpConnection(channel, &connection)
|
||||
}
|
||||
case "exec":
|
||||
connection.SetProtocol(common.ProtocolSSH)
|
||||
ok = processSSHCommand(req.Payload, &connection, channel, c.EnabledSSHCommands)
|
||||
}
|
||||
req.Reply(ok, nil) //nolint:errcheck
|
||||
|
@ -425,9 +346,10 @@ func (c Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.Server
|
|||
}
|
||||
}
|
||||
|
||||
func (c Configuration) handleSftpConnection(channel ssh.Channel, connection Connection) {
|
||||
addConnection(connection)
|
||||
defer removeConnection(connection)
|
||||
func (c Configuration) handleSftpConnection(channel ssh.Channel, connection *Connection) {
|
||||
common.Connections.Add(connection)
|
||||
defer common.Connections.Remove(connection)
|
||||
|
||||
// Create a new handler for the currently logged in user's server.
|
||||
handler := c.createHandler(connection)
|
||||
|
||||
|
@ -435,17 +357,17 @@ func (c Configuration) handleSftpConnection(channel ssh.Channel, connection Conn
|
|||
server := sftp.NewRequestServer(channel, handler, sftp.WithRSAllocator())
|
||||
|
||||
if err := server.Serve(); err == io.EOF {
|
||||
connection.Log(logger.LevelDebug, logSender, "connection closed, sending exit status")
|
||||
connection.Log(logger.LevelDebug, "connection closed, sending exit status")
|
||||
exitStatus := sshSubsystemExitStatus{Status: uint32(0)}
|
||||
_, err = channel.SendRequest("exit-status", false, ssh.Marshal(&exitStatus))
|
||||
connection.Log(logger.LevelDebug, logSender, "sent exit status %+v error: %v", exitStatus, err)
|
||||
connection.Log(logger.LevelDebug, "sent exit status %+v error: %v", exitStatus, err)
|
||||
server.Close()
|
||||
} else if err != nil {
|
||||
connection.Log(logger.LevelWarn, logSender, "connection closed with error: %v", err)
|
||||
connection.Log(logger.LevelWarn, "connection closed with error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c Configuration) createHandler(connection Connection) sftp.Handlers {
|
||||
func (c Configuration) createHandler(connection *Connection) sftp.Handlers {
|
||||
return sftp.Handlers{
|
||||
FileGet: connection,
|
||||
FilePut: connection,
|
||||
|
@ -465,7 +387,7 @@ func loginUser(user dataprovider.User, loginMethod, publicKey string, conn ssh.C
|
|||
return nil, fmt.Errorf("cannot login user with invalid home dir: %#v", user.HomeDir)
|
||||
}
|
||||
if user.MaxSessions > 0 {
|
||||
activeSessions := getActiveSessions(user.Username)
|
||||
activeSessions := common.Connections.GetActiveSessions(user.Username)
|
||||
if activeSessions >= user.MaxSessions {
|
||||
logger.Debug(logSender, "", "authentication refused for user: %#v, too many open sessions: %v/%v", user.Username,
|
||||
activeSessions, user.MaxSessions)
|
||||
|
|
555
sftpd/sftpd.go
555
sftpd/sftpd.go
|
@ -4,139 +4,22 @@
|
|||
package sftpd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpclient"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/metrics"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
logSender = "sftpd"
|
||||
logSenderSCP = "scp"
|
||||
logSenderSSH = "ssh"
|
||||
uploadLogSender = "Upload"
|
||||
downloadLogSender = "Download"
|
||||
renameLogSender = "Rename"
|
||||
rmdirLogSender = "Rmdir"
|
||||
mkdirLogSender = "Mkdir"
|
||||
symlinkLogSender = "Symlink"
|
||||
removeLogSender = "Remove"
|
||||
chownLogSender = "Chown"
|
||||
chmodLogSender = "Chmod"
|
||||
chtimesLogSender = "Chtimes"
|
||||
sshCommandLogSender = "SSHCommand"
|
||||
operationDownload = "download"
|
||||
operationUpload = "upload"
|
||||
operationDelete = "delete"
|
||||
operationPreDelete = "pre-delete"
|
||||
operationRename = "rename"
|
||||
operationSSHCmd = "ssh_cmd"
|
||||
protocolSFTP = "SFTP"
|
||||
protocolSCP = "SCP"
|
||||
protocolSSH = "SSH"
|
||||
handshakeTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
const (
|
||||
uploadModeStandard = iota
|
||||
uploadModeAtomic
|
||||
uploadModeAtomicWithResume
|
||||
logSender = "sftpd"
|
||||
handshakeTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
mutex sync.RWMutex
|
||||
openConnections map[string]Connection
|
||||
activeTransfers []*Transfer
|
||||
idleTimeout time.Duration
|
||||
activeQuotaScans []ActiveQuotaScan
|
||||
activeVFoldersQuotaScan []ActiveVirtualFolderQuotaScan
|
||||
actions Actions
|
||||
uploadMode int
|
||||
setstatMode int
|
||||
supportedSSHCommands = []string{"scp", "md5sum", "sha1sum", "sha256sum", "sha384sum", "sha512sum", "cd", "pwd",
|
||||
supportedSSHCommands = []string{"scp", "md5sum", "sha1sum", "sha256sum", "sha384sum", "sha512sum", "cd", "pwd",
|
||||
"git-receive-pack", "git-upload-pack", "git-upload-archive", "rsync", "sftpgo-copy", "sftpgo-remove"}
|
||||
defaultSSHCommands = []string{"md5sum", "sha1sum", "cd", "pwd", "scp"}
|
||||
sshHashCommands = []string{"md5sum", "sha1sum", "sha256sum", "sha384sum", "sha512sum"}
|
||||
systemCommands = []string{"git-receive-pack", "git-upload-pack", "git-upload-archive", "rsync"}
|
||||
errUnconfiguredAction = errors.New("no hook is configured for this action")
|
||||
errNoHook = errors.New("unable to execute action, no hook defined")
|
||||
errUnexpectedHTTResponse = errors.New("unexpected HTTP response code")
|
||||
defaultSSHCommands = []string{"md5sum", "sha1sum", "cd", "pwd", "scp"}
|
||||
sshHashCommands = []string{"md5sum", "sha1sum", "sha256sum", "sha384sum", "sha512sum"}
|
||||
systemCommands = []string{"git-receive-pack", "git-upload-pack", "git-upload-archive", "rsync"}
|
||||
)
|
||||
|
||||
type connectionTransfer struct {
|
||||
OperationType string `json:"operation_type"`
|
||||
StartTime int64 `json:"start_time"`
|
||||
Size int64 `json:"size"`
|
||||
LastActivity int64 `json:"last_activity"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// ActiveQuotaScan defines an active quota scan for a user home dir
|
||||
type ActiveQuotaScan struct {
|
||||
// Username to which the quota scan refers
|
||||
Username string `json:"username"`
|
||||
// quota scan start time as unix timestamp in milliseconds
|
||||
StartTime int64 `json:"start_time"`
|
||||
}
|
||||
|
||||
// ActiveVirtualFolderQuotaScan defines an active quota scan for a virtual folder
|
||||
type ActiveVirtualFolderQuotaScan struct {
|
||||
// folder path to which the quota scan refers
|
||||
MappedPath string `json:"mapped_path"`
|
||||
// quota scan start time as unix timestamp in milliseconds
|
||||
StartTime int64 `json:"start_time"`
|
||||
}
|
||||
|
||||
// Actions to execute on SFTP create, download, delete and rename.
|
||||
// An external command can be executed and/or an HTTP notification can be fired
|
||||
type Actions struct {
|
||||
// Valid values are download, upload, delete, rename, ssh_cmd. Empty slice to disable
|
||||
ExecuteOn []string `json:"execute_on" mapstructure:"execute_on"`
|
||||
// Deprecated: please use Hook
|
||||
Command string `json:"command" mapstructure:"command"`
|
||||
// Deprecated: please use Hook
|
||||
HTTPNotificationURL string `json:"http_notification_url" mapstructure:"http_notification_url"`
|
||||
// Absolute path to an external program or an HTTP URL
|
||||
Hook string `json:"hook" mapstructure:"hook"`
|
||||
}
|
||||
|
||||
// ConnectionStatus status for an active connection
|
||||
type ConnectionStatus struct {
|
||||
// Logged in username
|
||||
Username string `json:"username"`
|
||||
// Unique identifier for the connection
|
||||
ConnectionID string `json:"connection_id"`
|
||||
// client's version string
|
||||
ClientVersion string `json:"client_version"`
|
||||
// Remote address for this connection
|
||||
RemoteAddress string `json:"remote_address"`
|
||||
// Connection time as unix timestamp in milliseconds
|
||||
ConnectionTime int64 `json:"connection_time"`
|
||||
// Last activity as unix timestamp in milliseconds
|
||||
LastActivity int64 `json:"last_activity"`
|
||||
// Protocol for this connection: SFTP, SCP, SSH
|
||||
Protocol string `json:"protocol"`
|
||||
// active uploads/downloads
|
||||
Transfers []connectionTransfer `json:"active_transfers"`
|
||||
// for protocol SSH this is the issued command
|
||||
SSHCommand string `json:"ssh_command"`
|
||||
}
|
||||
|
||||
type sshSubsystemExitStatus struct {
|
||||
Status uint32
|
||||
}
|
||||
|
@ -145,72 +28,6 @@ type sshSubsystemExecMsg struct {
|
|||
Command string
|
||||
}
|
||||
|
||||
type actionNotification struct {
|
||||
Action string `json:"action"`
|
||||
Username string `json:"username"`
|
||||
Path string `json:"path"`
|
||||
TargetPath string `json:"target_path,omitempty"`
|
||||
SSHCmd string `json:"ssh_cmd,omitempty"`
|
||||
FileSize int64 `json:"file_size,omitempty"`
|
||||
FsProvider int `json:"fs_provider"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
func newActionNotification(user dataprovider.User, operation, filePath, target, sshCmd string, fileSize int64,
|
||||
err error) actionNotification {
|
||||
bucket := ""
|
||||
endpoint := ""
|
||||
status := 1
|
||||
if user.FsConfig.Provider == 1 {
|
||||
bucket = user.FsConfig.S3Config.Bucket
|
||||
endpoint = user.FsConfig.S3Config.Endpoint
|
||||
} else if user.FsConfig.Provider == 2 {
|
||||
bucket = user.FsConfig.GCSConfig.Bucket
|
||||
}
|
||||
if err == errQuotaExceeded {
|
||||
status = 2
|
||||
} else if err != nil {
|
||||
status = 0
|
||||
}
|
||||
return actionNotification{
|
||||
Action: operation,
|
||||
Username: user.Username,
|
||||
Path: filePath,
|
||||
TargetPath: target,
|
||||
SSHCmd: sshCmd,
|
||||
FileSize: fileSize,
|
||||
FsProvider: user.FsConfig.Provider,
|
||||
Bucket: bucket,
|
||||
Endpoint: endpoint,
|
||||
Status: status,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *actionNotification) AsJSON() []byte {
|
||||
res, _ := json.Marshal(a)
|
||||
return res
|
||||
}
|
||||
|
||||
func (a *actionNotification) AsEnvVars() []string {
|
||||
return []string{fmt.Sprintf("SFTPGO_ACTION=%v", a.Action),
|
||||
fmt.Sprintf("SFTPGO_ACTION_USERNAME=%v", a.Username),
|
||||
fmt.Sprintf("SFTPGO_ACTION_PATH=%v", a.Path),
|
||||
fmt.Sprintf("SFTPGO_ACTION_TARGET=%v", a.TargetPath),
|
||||
fmt.Sprintf("SFTPGO_ACTION_SSH_CMD=%v", a.SSHCmd),
|
||||
fmt.Sprintf("SFTPGO_ACTION_FILE_SIZE=%v", a.FileSize),
|
||||
fmt.Sprintf("SFTPGO_ACTION_FS_PROVIDER=%v", a.FsProvider),
|
||||
fmt.Sprintf("SFTPGO_ACTION_BUCKET=%v", a.Bucket),
|
||||
fmt.Sprintf("SFTPGO_ACTION_ENDPOINT=%v", a.Endpoint),
|
||||
fmt.Sprintf("SFTPGO_ACTION_STATUS=%v", a.Status),
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
openConnections = make(map[string]Connection)
|
||||
}
|
||||
|
||||
// GetDefaultSSHCommands returns the SSH commands enabled as default
|
||||
func GetDefaultSSHCommands() []string {
|
||||
result := make([]string, len(defaultSSHCommands))
|
||||
|
@ -224,363 +41,3 @@ func GetSupportedSSHCommands() []string {
|
|||
copy(result, supportedSSHCommands)
|
||||
return result
|
||||
}
|
||||
|
||||
// GetConnectionDuration returns the connection duration as string
|
||||
func (c ConnectionStatus) GetConnectionDuration() string {
|
||||
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(c.ConnectionTime))
|
||||
return utils.GetDurationAsString(elapsed)
|
||||
}
|
||||
|
||||
// GetConnectionInfo returns connection info.
|
||||
// Protocol,Client Version and RemoteAddress are returned.
|
||||
// For SSH commands the issued command is returned too.
|
||||
func (c ConnectionStatus) GetConnectionInfo() string {
|
||||
result := fmt.Sprintf("%v. Client: %#v From: %#v", c.Protocol, c.ClientVersion, c.RemoteAddress)
|
||||
if c.Protocol == protocolSSH && len(c.SSHCommand) > 0 {
|
||||
result += fmt.Sprintf(". Command: %#v", c.SSHCommand)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetTransfersAsString returns the active transfers as string
|
||||
func (c ConnectionStatus) GetTransfersAsString() string {
|
||||
result := ""
|
||||
for _, t := range c.Transfers {
|
||||
if len(result) > 0 {
|
||||
result += ". "
|
||||
}
|
||||
result += t.getConnectionTransferAsString()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (t connectionTransfer) getConnectionTransferAsString() string {
|
||||
result := ""
|
||||
if t.OperationType == operationUpload {
|
||||
result += "UL"
|
||||
} else {
|
||||
result += "DL"
|
||||
}
|
||||
result += fmt.Sprintf(" %#v ", t.Path)
|
||||
if t.Size > 0 {
|
||||
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(t.StartTime))
|
||||
speed := float64(t.Size) / float64(utils.GetTimeAsMsSinceEpoch(time.Now())-t.StartTime)
|
||||
result += fmt.Sprintf("Size: %#v Elapsed: %#v Speed: \"%.1f KB/s\"", utils.ByteCountSI(t.Size),
|
||||
utils.GetDurationAsString(elapsed), speed)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func getActiveSessions(username string) int {
|
||||
mutex.RLock()
|
||||
defer mutex.RUnlock()
|
||||
numSessions := 0
|
||||
for _, c := range openConnections {
|
||||
if c.User.Username == username {
|
||||
numSessions++
|
||||
}
|
||||
}
|
||||
return numSessions
|
||||
}
|
||||
|
||||
// GetQuotaScans returns the active quota scans for users home directories
|
||||
func GetQuotaScans() []ActiveQuotaScan {
|
||||
mutex.RLock()
|
||||
defer mutex.RUnlock()
|
||||
scans := make([]ActiveQuotaScan, len(activeQuotaScans))
|
||||
copy(scans, activeQuotaScans)
|
||||
return scans
|
||||
}
|
||||
|
||||
// AddQuotaScan add a user to the ones with active quota scans.
|
||||
// Returns false if the user has a quota scan already running
|
||||
func AddQuotaScan(username string) bool {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
for _, s := range activeQuotaScans {
|
||||
if s.Username == username {
|
||||
return false
|
||||
}
|
||||
}
|
||||
activeQuotaScans = append(activeQuotaScans, ActiveQuotaScan{
|
||||
Username: username,
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
// RemoveQuotaScan removes a user from the ones with active quota scans
|
||||
func RemoveQuotaScan(username string) error {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
var err error
|
||||
indexToRemove := -1
|
||||
for i, s := range activeQuotaScans {
|
||||
if s.Username == username {
|
||||
indexToRemove = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if indexToRemove >= 0 {
|
||||
activeQuotaScans[indexToRemove] = activeQuotaScans[len(activeQuotaScans)-1]
|
||||
activeQuotaScans = activeQuotaScans[:len(activeQuotaScans)-1]
|
||||
} else {
|
||||
err = fmt.Errorf("quota scan to remove not found for user: %#v", username)
|
||||
logger.Warn(logSender, "", "error: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetVFoldersQuotaScans returns the active quota scans for virtual folders
|
||||
func GetVFoldersQuotaScans() []ActiveVirtualFolderQuotaScan {
|
||||
mutex.RLock()
|
||||
defer mutex.RUnlock()
|
||||
scans := make([]ActiveVirtualFolderQuotaScan, len(activeVFoldersQuotaScan))
|
||||
copy(scans, activeVFoldersQuotaScan)
|
||||
return scans
|
||||
}
|
||||
|
||||
// AddVFolderQuotaScan add a virtual folder to the ones with active quota scans.
|
||||
// Returns false if the folder has a quota scan already running
|
||||
func AddVFolderQuotaScan(folderPath string) bool {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
for _, s := range activeVFoldersQuotaScan {
|
||||
if s.MappedPath == folderPath {
|
||||
return false
|
||||
}
|
||||
}
|
||||
activeVFoldersQuotaScan = append(activeVFoldersQuotaScan, ActiveVirtualFolderQuotaScan{
|
||||
MappedPath: folderPath,
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
// RemoveVFolderQuotaScan removes a folder from the ones with active quota scans
|
||||
func RemoveVFolderQuotaScan(folderPath string) error {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
var err error
|
||||
indexToRemove := -1
|
||||
for i, s := range activeVFoldersQuotaScan {
|
||||
if s.MappedPath == folderPath {
|
||||
indexToRemove = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if indexToRemove >= 0 {
|
||||
activeVFoldersQuotaScan[indexToRemove] = activeVFoldersQuotaScan[len(activeVFoldersQuotaScan)-1]
|
||||
activeVFoldersQuotaScan = activeVFoldersQuotaScan[:len(activeVFoldersQuotaScan)-1]
|
||||
} else {
|
||||
err = fmt.Errorf("quota scan to remove not found for user: %#v", folderPath)
|
||||
logger.Warn(logSender, "", "error: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CloseActiveConnection closes an active SFTP connection.
|
||||
// It returns true on success
|
||||
func CloseActiveConnection(connectionID string) bool {
|
||||
result := false
|
||||
mutex.RLock()
|
||||
defer mutex.RUnlock()
|
||||
if c, ok := openConnections[connectionID]; ok {
|
||||
err := c.close()
|
||||
c.Log(logger.LevelDebug, logSender, "close connection requested, close err: %v", err)
|
||||
result = true
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetConnectionsStats returns stats for active connections
|
||||
func GetConnectionsStats() []ConnectionStatus {
|
||||
mutex.RLock()
|
||||
defer mutex.RUnlock()
|
||||
stats := []ConnectionStatus{}
|
||||
for _, c := range openConnections {
|
||||
conn := ConnectionStatus{
|
||||
Username: c.User.Username,
|
||||
ConnectionID: c.ID,
|
||||
ClientVersion: c.ClientVersion,
|
||||
RemoteAddress: c.RemoteAddr.String(),
|
||||
ConnectionTime: utils.GetTimeAsMsSinceEpoch(c.StartTime),
|
||||
LastActivity: utils.GetTimeAsMsSinceEpoch(c.lastActivity),
|
||||
Protocol: c.protocol,
|
||||
Transfers: []connectionTransfer{},
|
||||
SSHCommand: c.command,
|
||||
}
|
||||
for _, t := range activeTransfers {
|
||||
if t.connectionID == c.ID {
|
||||
if t.lastActivity.UnixNano() > c.lastActivity.UnixNano() {
|
||||
conn.LastActivity = utils.GetTimeAsMsSinceEpoch(t.lastActivity)
|
||||
}
|
||||
var operationType string
|
||||
var size int64
|
||||
if t.transferType == transferUpload {
|
||||
operationType = operationUpload
|
||||
size = t.bytesReceived
|
||||
} else {
|
||||
operationType = operationDownload
|
||||
size = t.bytesSent
|
||||
}
|
||||
connTransfer := connectionTransfer{
|
||||
OperationType: operationType,
|
||||
StartTime: utils.GetTimeAsMsSinceEpoch(t.start),
|
||||
Size: size,
|
||||
LastActivity: utils.GetTimeAsMsSinceEpoch(t.lastActivity),
|
||||
Path: c.fs.GetRelativePath(t.path),
|
||||
}
|
||||
conn.Transfers = append(conn.Transfers, connTransfer)
|
||||
}
|
||||
}
|
||||
stats = append(stats, conn)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func startIdleTimer(maxIdleTime time.Duration) {
|
||||
idleTimeout = maxIdleTime
|
||||
go func() {
|
||||
for range time.Tick(5 * time.Minute) {
|
||||
CheckIdleConnections()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// CheckIdleConnections disconnects clients idle for too long, based on IdleTimeout setting
|
||||
func CheckIdleConnections() {
|
||||
mutex.RLock()
|
||||
defer mutex.RUnlock()
|
||||
for _, c := range openConnections {
|
||||
idleTime := time.Since(c.lastActivity)
|
||||
for _, t := range activeTransfers {
|
||||
if t.connectionID == c.ID {
|
||||
transferIdleTime := time.Since(t.lastActivity)
|
||||
if transferIdleTime < idleTime {
|
||||
c.Log(logger.LevelDebug, logSender, "idle time: %v setted to transfer idle time: %v",
|
||||
idleTime, transferIdleTime)
|
||||
idleTime = transferIdleTime
|
||||
}
|
||||
}
|
||||
}
|
||||
if idleTime > idleTimeout {
|
||||
err := c.close()
|
||||
c.Log(logger.LevelInfo, logSender, "close idle connection, idle time: %v, close error: %v", idleTime, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addConnection(c Connection) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
openConnections[c.ID] = c
|
||||
metrics.UpdateActiveConnectionsSize(len(openConnections))
|
||||
c.Log(logger.LevelDebug, logSender, "connection added, num open connections: %v", len(openConnections))
|
||||
}
|
||||
|
||||
func removeConnection(c Connection) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
delete(openConnections, c.ID)
|
||||
metrics.UpdateActiveConnectionsSize(len(openConnections))
|
||||
// we have finished to send data here and most of the time the underlying network connection
|
||||
// is already closed. Sometime a client can still be reading the last sended data, so we set
|
||||
// a deadline instead of directly closing the network connection.
|
||||
// Setting a deadline on an already closed connection has no effect.
|
||||
// We only need to ensure that a connection will not remain indefinitely open and so the
|
||||
// underlying file descriptor is not released.
|
||||
// This should protect us against buggy clients and edge cases.
|
||||
c.netConn.SetDeadline(time.Now().Add(2 * time.Minute)) //nolint:errcheck
|
||||
c.Log(logger.LevelDebug, logSender, "connection removed, num open connections: %v", len(openConnections))
|
||||
}
|
||||
|
||||
func addTransfer(transfer *Transfer) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
activeTransfers = append(activeTransfers, transfer)
|
||||
}
|
||||
|
||||
func removeTransfer(transfer *Transfer) error {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
var err error
|
||||
indexToRemove := -1
|
||||
for i, v := range activeTransfers {
|
||||
if v == transfer {
|
||||
indexToRemove = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if indexToRemove >= 0 {
|
||||
activeTransfers[indexToRemove] = activeTransfers[len(activeTransfers)-1]
|
||||
activeTransfers = activeTransfers[:len(activeTransfers)-1]
|
||||
} else {
|
||||
logger.Warn(logSender, transfer.connectionID, "transfer to remove not found!")
|
||||
err = fmt.Errorf("transfer to remove not found")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func updateConnectionActivity(id string) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
if c, ok := openConnections[id]; ok {
|
||||
c.lastActivity = time.Now()
|
||||
openConnections[id] = c
|
||||
}
|
||||
}
|
||||
|
||||
func isAtomicUploadEnabled() bool {
|
||||
return uploadMode == uploadModeAtomic || uploadMode == uploadModeAtomicWithResume
|
||||
}
|
||||
|
||||
func executeNotificationCommand(a actionNotification) error {
|
||||
if !filepath.IsAbs(actions.Hook) {
|
||||
err := fmt.Errorf("invalid notification command %#v", actions.Hook)
|
||||
logger.Warn(logSender, "", "unable to execute notification command: %v", err)
|
||||
return err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
cmd := exec.CommandContext(ctx, actions.Hook, a.Action, a.Username, a.Path, a.TargetPath, a.SSHCmd)
|
||||
cmd.Env = append(os.Environ(), a.AsEnvVars()...)
|
||||
startTime := time.Now()
|
||||
err := cmd.Run()
|
||||
logger.Debug(logSender, "", "executed command %#v with arguments: %#v, %#v, %#v, %#v, %#v, elapsed: %v, error: %v",
|
||||
actions.Hook, a.Action, a.Username, a.Path, a.TargetPath, a.SSHCmd, time.Since(startTime), err)
|
||||
return err
|
||||
}
|
||||
|
||||
func executeAction(a actionNotification) error {
|
||||
if !utils.IsStringInSlice(a.Action, actions.ExecuteOn) {
|
||||
return errUnconfiguredAction
|
||||
}
|
||||
if len(actions.Hook) == 0 {
|
||||
logger.Warn(logSender, "", "Unable to send notification, no hook is defined")
|
||||
return errNoHook
|
||||
}
|
||||
if strings.HasPrefix(actions.Hook, "http") {
|
||||
var url *url.URL
|
||||
url, err := url.Parse(actions.Hook)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "Invalid hook %#v for operation %#v: %v", actions.Hook, a.Action, err)
|
||||
return err
|
||||
}
|
||||
startTime := time.Now()
|
||||
httpClient := httpclient.GetHTTPClient()
|
||||
resp, err := httpClient.Post(url.String(), "application/json", bytes.NewBuffer(a.AsJSON()))
|
||||
respCode := 0
|
||||
if err == nil {
|
||||
respCode = resp.StatusCode
|
||||
resp.Body.Close()
|
||||
if respCode != http.StatusOK {
|
||||
err = errUnexpectedHTTResponse
|
||||
}
|
||||
}
|
||||
logger.Debug(logSender, "", "notified operation %#v to URL: %v status code: %v, elapsed: %v err: %v",
|
||||
a.Action, url.String(), respCode, time.Since(startTime), err)
|
||||
return err
|
||||
}
|
||||
return executeNotificationCommand(a)
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -36,11 +37,11 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/crypto/ssh"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/config"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/httpd"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
@ -106,10 +107,9 @@ iixITGvaNZh/tjAAAACW5pY29sYUBwMQE=
|
|||
testCertOtherSourceAddress = "ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgZ4Su0250R4sQRNYJqJH9VTp9OyeYMAvqY5+lJRI4LzMAAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0AAAAAAAAAAwAAAAEAAAAOdGVzdF91c2VyX3NmdHAAAAASAAAADnRlc3RfdXNlcl9zZnRwAAAAAAAAAAD//////////wAAACYAAAAOc291cmNlLWFkZHJlc3MAAAAQAAAADDE3Mi4xNi4zNC40NQAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAZcAAAAHc3NoLXJzYQAAAAMBAAEAAAGBAMXl9zBkeLKLGacToiU5kmlmFZeiHraA37Jp0ADQYnnT1IARplUs8M/xLlGwTyZSKRHfDHKdWyHEd6oyGuRL5GU1uFKU5cN02D3jJOur/EXxn8+ApEie95/viTmLtsAjK3NruMRHMUn+6NMTLfnftPmTkRhAnXllAa6/PKdJ2/7qj31KMjiMWmXJA5nZBxhsQCaEebkaBCUiIQUb9GUO0uSw66UpnE5jeo/M/QDJDG1klef/m8bjRpb0tNvDEImpaWCuQVcyoABUJu5TliynCGJeYq3U+yV2JfDbeiWhrhxoIo3WPNsWIa5k1cRTYRvHski+NAI9pRjAuMRuREPEOo3++bBmoG4piK4b0Rp/H6cVJCSvtBhvlv6ZP7/UgUeeZ5EaffzvfWQGq0fu2nML+36yhFf2nYe0kz70xiFuU7Y6pNI8ZOXGKFZSTKJEF6SkCFqIeV3XpOwb4Dds4keuiMZxf7mDqgZqsoYsAxzKQvVf6tmpP33cyjp3Znurjcw5cQAAAZQAAAAMcnNhLXNoYTItNTEyAAABgL34Q3Li8AJIxZLU+fh4i8ehUWpm31vEvlNjXVCeP70xI+5hWuEt6E1TgKw7GCL5GeD4KehX4vVcNs+A2eOdIUZfDBZIFxn88BN8xcMlDpAMJXgvNqGttiOwcspL6X3N288djUgpCI718lLRdz8nvFqcuYBhSpBm5KL4JzH5o1o8yqv75wMJsH8CJYwGhvWi0OgWOqaLRAk3IUxq3Fbgo/nX11NgrkY/dHIZCkGBFaLJ/M5mfmt/K/5hJAVgLcSxMwB/ryyGaziB9Pv7CwZ9uwnMoRcAvyr96lqgdtLt7LNY8ktugAJ7EnBWjQn4+EJAjjRK2sCaiwpdP37ckDZgmk0OWGEL1yVy8VXgl9QBd7Mb1EVl+lhRyw8jlgBXZOGqpdDrmKCdBYGtU7ujyndLXmxZEAlqhef0yCsyZPTkYH3RhjCYs8ATrEqndEpiL59Nej5uUGQURYijJfHep08AMb4rCxvIZATVm1Ocxu48rGCGolv8jZFJzSJq84HCrVRKMw== nicola@p1"
|
||||
// this is testPubKey signed using testCAUserKey but expired.
|
||||
// % ssh-keygen -s ca_user_key -I test_user_sftp -n test_user_sftp -V 20100101123000:20110101123000 -z 4 /tmp/test.pub
|
||||
testCertExpired = "ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgU3TLP5285k20fBSsdZioI78oJUpaRXFlgx5IPg6gWg8AAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0AAAAAAAAABAAAAAEAAAAOdGVzdF91c2VyX3NmdHAAAAASAAAADnRlc3RfdXNlcl9zZnRwAAAAAEs93LgAAAAATR8QOAAAAAAAAACCAAAAFXBlcm1pdC1YMTEtZm9yd2FyZGluZwAAAAAAAAAXcGVybWl0LWFnZW50LWZvcndhcmRpbmcAAAAAAAAAFnBlcm1pdC1wb3J0LWZvcndhcmRpbmcAAAAAAAAACnBlcm1pdC1wdHkAAAAAAAAADnBlcm1pdC11c2VyLXJjAAAAAAAAAAAAAAGXAAAAB3NzaC1yc2EAAAADAQABAAABgQDF5fcwZHiyixmnE6IlOZJpZhWXoh62gN+yadAA0GJ509SAEaZVLPDP8S5RsE8mUikR3wxynVshxHeqMhrkS+RlNbhSlOXDdNg94yTrq/xF8Z/PgKRInvef74k5i7bAIytza7jERzFJ/ujTEy3537T5k5EYQJ15ZQGuvzynSdv+6o99SjI4jFplyQOZ2QcYbEAmhHm5GgQlIiEFG/RlDtLksOulKZxOY3qPzP0AyQxtZJXn/5vG40aW9LTbwxCJqWlgrkFXMqAAVCbuU5YspwhiXmKt1PsldiXw23oloa4caCKN1jzbFiGuZNXEU2Ebx7JIvjQCPaUYwLjEbkRDxDqN/vmwZqBuKYiuG9Eafx+nFSQkr7QYb5b+mT+/1IFHnmeRGn38731kBqtH7tpzC/t+soRX9p2HtJM+9MYhblO2OqTSPGTlxihWUkyiRBekpAhaiHld16TsG+A3bOJHrojGcX+5g6oGarKGLAMcykL1X+rZqT993Mo6d2Z7q43MOXEAAAGUAAAADHJzYS1zaGEyLTUxMgAAAYAlH3hhj8J6xLyVpeLZjblzwDKrxp/MWiH30hQ965ExPrPRcoAZFEKVqOYdj6bp4Q19Q4Yzqdobg3aN5ym2iH0b2TlOY0mM901CAoHbNJyiLs+0KiFRoJ+30EDj/hcKusg6v8ln2yixPagAyQu3zyiWo4t1ZuO3I86xchGlptStxSdHAHPFCfpbhcnzWFZctiMqUutl82C4ROWyjOZcRzdVdWHeN5h8wnooXuvba2VkT8QPmjYYyRGuQ3Hg+ySdh8Tel4wiix1Dg5MX7Wjh4hKEx80No9UPy+0iyZMNc07lsWAtrY6NRxGM5CzB6mklscB8TzFrVSnIl9u3bquLfaCrFt/Mft5dR7Yy4jmF+zUhjia6h6giCZ91J+FZ4hV+WkBtPCvTfrGWoA1BgEB/iI2xOq/NPqJ7UXRoMXk/l0NPgRPT2JS1adegqnt4ddr6IlmPyZxaSEvXhanjKdfMlEFYO1wz7ouqpYUozQVy4KXBlzFlNwyD1hI+k4+/A6AIYeI= nicola@p1"
|
||||
configDir = ".."
|
||||
permissionErrorString = "Permission Denied"
|
||||
osWindows = "windows"
|
||||
testCertExpired = "ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgU3TLP5285k20fBSsdZioI78oJUpaRXFlgx5IPg6gWg8AAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0AAAAAAAAABAAAAAEAAAAOdGVzdF91c2VyX3NmdHAAAAASAAAADnRlc3RfdXNlcl9zZnRwAAAAAEs93LgAAAAATR8QOAAAAAAAAACCAAAAFXBlcm1pdC1YMTEtZm9yd2FyZGluZwAAAAAAAAAXcGVybWl0LWFnZW50LWZvcndhcmRpbmcAAAAAAAAAFnBlcm1pdC1wb3J0LWZvcndhcmRpbmcAAAAAAAAACnBlcm1pdC1wdHkAAAAAAAAADnBlcm1pdC11c2VyLXJjAAAAAAAAAAAAAAGXAAAAB3NzaC1yc2EAAAADAQABAAABgQDF5fcwZHiyixmnE6IlOZJpZhWXoh62gN+yadAA0GJ509SAEaZVLPDP8S5RsE8mUikR3wxynVshxHeqMhrkS+RlNbhSlOXDdNg94yTrq/xF8Z/PgKRInvef74k5i7bAIytza7jERzFJ/ujTEy3537T5k5EYQJ15ZQGuvzynSdv+6o99SjI4jFplyQOZ2QcYbEAmhHm5GgQlIiEFG/RlDtLksOulKZxOY3qPzP0AyQxtZJXn/5vG40aW9LTbwxCJqWlgrkFXMqAAVCbuU5YspwhiXmKt1PsldiXw23oloa4caCKN1jzbFiGuZNXEU2Ebx7JIvjQCPaUYwLjEbkRDxDqN/vmwZqBuKYiuG9Eafx+nFSQkr7QYb5b+mT+/1IFHnmeRGn38731kBqtH7tpzC/t+soRX9p2HtJM+9MYhblO2OqTSPGTlxihWUkyiRBekpAhaiHld16TsG+A3bOJHrojGcX+5g6oGarKGLAMcykL1X+rZqT993Mo6d2Z7q43MOXEAAAGUAAAADHJzYS1zaGEyLTUxMgAAAYAlH3hhj8J6xLyVpeLZjblzwDKrxp/MWiH30hQ965ExPrPRcoAZFEKVqOYdj6bp4Q19Q4Yzqdobg3aN5ym2iH0b2TlOY0mM901CAoHbNJyiLs+0KiFRoJ+30EDj/hcKusg6v8ln2yixPagAyQu3zyiWo4t1ZuO3I86xchGlptStxSdHAHPFCfpbhcnzWFZctiMqUutl82C4ROWyjOZcRzdVdWHeN5h8wnooXuvba2VkT8QPmjYYyRGuQ3Hg+ySdh8Tel4wiix1Dg5MX7Wjh4hKEx80No9UPy+0iyZMNc07lsWAtrY6NRxGM5CzB6mklscB8TzFrVSnIl9u3bquLfaCrFt/Mft5dR7Yy4jmF+zUhjia6h6giCZ91J+FZ4hV+WkBtPCvTfrGWoA1BgEB/iI2xOq/NPqJ7UXRoMXk/l0NPgRPT2JS1adegqnt4ddr6IlmPyZxaSEvXhanjKdfMlEFYO1wz7ouqpYUozQVy4KXBlzFlNwyD1hI+k4+/A6AIYeI= nicola@p1"
|
||||
configDir = ".."
|
||||
osWindows = "windows"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -136,19 +136,37 @@ func TestMain(m *testing.M) {
|
|||
logger.InitLogger(logFilePath, 5, 1, 28, false, zerolog.DebugLevel)
|
||||
err := ioutil.WriteFile(loginBannerFile, []byte("simple login banner\n"), os.ModePerm)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error creating login banner: %v", err)
|
||||
logger.ErrorToConsole("error creating login banner: %v", err)
|
||||
}
|
||||
err = config.LoadConfig(configDir, "")
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error loading configuration: %v", err)
|
||||
logger.ErrorToConsole("error loading configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
providerConf := config.GetProviderConf()
|
||||
logger.InfoToConsole("Starting SFTPD tests, provider: %v", providerConf.Driver)
|
||||
|
||||
commonConf := config.GetCommonConfig()
|
||||
// we run the test cases with UploadMode atomic and resume support. The non atomic code path
|
||||
// simply does not execute some code so if it works in atomic mode will
|
||||
// work in non atomic mode too
|
||||
commonConf.UploadMode = 2
|
||||
homeBasePath = os.TempDir()
|
||||
checkSystemCommands()
|
||||
var scriptArgs string
|
||||
if runtime.GOOS == osWindows {
|
||||
scriptArgs = "%*"
|
||||
} else {
|
||||
commonConf.Actions.ExecuteOn = []string{"download", "upload", "rename", "delete", "ssh_cmd"}
|
||||
commonConf.Actions.Hook = hookCmdPath
|
||||
scriptArgs = "$@"
|
||||
}
|
||||
|
||||
common.Initialize(commonConf)
|
||||
|
||||
err = dataprovider.Initialize(providerConf, configDir)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error initializing data provider: %v", err)
|
||||
logger.ErrorToConsole("error initializing data provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -166,25 +184,11 @@ func TestMain(m *testing.M) {
|
|||
sftpdConf.LoginBannerFile = loginBannerFileName
|
||||
// we need to test all supported ssh commands
|
||||
sftpdConf.EnabledSSHCommands = []string{"*"}
|
||||
// we run the test cases with UploadMode atomic and resume support. The non atomic code path
|
||||
// simply does not execute some code so if it works in atomic mode will
|
||||
// work in non atomic mode too
|
||||
sftpdConf.UploadMode = 2
|
||||
homeBasePath = os.TempDir()
|
||||
checkSystemCommands()
|
||||
var scriptArgs string
|
||||
if runtime.GOOS == osWindows {
|
||||
scriptArgs = "%*"
|
||||
} else {
|
||||
sftpdConf.Actions.ExecuteOn = []string{"download", "upload", "rename", "delete", "ssh_cmd"}
|
||||
sftpdConf.Actions.Hook = hookCmdPath
|
||||
scriptArgs = "$@"
|
||||
}
|
||||
|
||||
keyIntAuthPath = filepath.Join(homeBasePath, "keyintauth.sh")
|
||||
err = ioutil.WriteFile(keyIntAuthPath, getKeyboardInteractiveScriptContent([]string{"1", "2"}, 0, false, 1), 0755)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("error writing keyboard interactive script: %v", err)
|
||||
logger.ErrorToConsole("error writing keyboard interactive script: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
sftpdConf.KeyboardInteractiveHook = keyIntAuthPath
|
||||
|
@ -217,13 +221,15 @@ func TestMain(m *testing.M) {
|
|||
go func() {
|
||||
logger.Debug(logSender, "", "initializing SFTP server with config %+v", sftpdConf)
|
||||
if err := sftpdConf.Initialize(configDir); err != nil {
|
||||
logger.Error(logSender, "", "could not start SFTP server: %v", err)
|
||||
logger.ErrorToConsole("could not start SFTP server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if err := httpdConf.Initialize(configDir, false); err != nil {
|
||||
logger.Error(logSender, "", "could not start HTTP server: %v", err)
|
||||
logger.ErrorToConsole("could not start HTTP server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -231,22 +237,26 @@ func TestMain(m *testing.M) {
|
|||
waitTCPListening(fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort))
|
||||
|
||||
sftpdConf.BindPort = 2222
|
||||
sftpdConf.ProxyProtocol = 1
|
||||
common.Config.ProxyProtocol = 1
|
||||
go func() {
|
||||
logger.Debug(logSender, "", "initializing SFTP server with config %+v", sftpdConf)
|
||||
logger.Debug(logSender, "", "initializing SFTP server with config %+v and proxy protocol %v",
|
||||
sftpdConf, common.Config.ProxyProtocol)
|
||||
if err := sftpdConf.Initialize(configDir); err != nil {
|
||||
logger.Error(logSender, "", "could not start SFTP server: %v", err)
|
||||
logger.ErrorToConsole("could not start SFTP server with proxy protocol 1: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
waitTCPListening(fmt.Sprintf("%s:%d", sftpdConf.BindAddress, sftpdConf.BindPort))
|
||||
|
||||
sftpdConf.BindPort = 2224
|
||||
sftpdConf.ProxyProtocol = 2
|
||||
common.Config.ProxyProtocol = 2
|
||||
go func() {
|
||||
logger.Debug(logSender, "", "initializing SFTP server with config %+v", sftpdConf)
|
||||
logger.Debug(logSender, "", "initializing SFTP server with config %+v and proxy protocol %v",
|
||||
sftpdConf, common.Config.ProxyProtocol)
|
||||
if err := sftpdConf.Initialize(configDir); err != nil {
|
||||
logger.Error(logSender, "", "could not start SFTP server: %v", err)
|
||||
logger.ErrorToConsole("could not start SFTP server with proxy protocol 2: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -269,7 +279,6 @@ func TestInitialization(t *testing.T) {
|
|||
err := config.LoadConfig(configDir, "")
|
||||
assert.NoError(t, err)
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.Umask = "invalid umask"
|
||||
sftpdConf.BindPort = 2022
|
||||
sftpdConf.LoginBannerFile = "invalid_file"
|
||||
sftpdConf.EnabledSSHCommands = append(sftpdConf.EnabledSSHCommands, "ls")
|
||||
|
@ -282,15 +291,15 @@ func TestInitialization(t *testing.T) {
|
|||
err = sftpdConf.Initialize(configDir)
|
||||
assert.Error(t, err)
|
||||
sftpdConf.BindPort = 4444
|
||||
sftpdConf.ProxyProtocol = 1
|
||||
sftpdConf.ProxyAllowed = []string{"1270.0.0.1"}
|
||||
common.Config.ProxyProtocol = 1
|
||||
common.Config.ProxyAllowed = []string{"1270.0.0.1"}
|
||||
err = sftpdConf.Initialize(configDir)
|
||||
assert.Error(t, err)
|
||||
sftpdConf.HostKeys = []string{"missing file"}
|
||||
sftpdConf.HostKeys = []string{"missing key"}
|
||||
err = sftpdConf.Initialize(configDir)
|
||||
assert.Error(t, err)
|
||||
sftpdConf.Keys = nil
|
||||
sftpdConf.TrustedUserCAKeys = []string{"missing file"}
|
||||
sftpdConf.HostKeys = nil
|
||||
sftpdConf.TrustedUserCAKeys = []string{"missing ca key"}
|
||||
err = sftpdConf.Initialize(configDir)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
@ -343,7 +352,7 @@ func TestBasicSFTPHandling(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConcurrentLogins(t *testing.T) {
|
||||
func TestConcurrency(t *testing.T) {
|
||||
usePubKey := true
|
||||
numLogins := 50
|
||||
u := getTestUser(usePubKey)
|
||||
|
@ -353,34 +362,76 @@ func TestConcurrentLogins(t *testing.T) {
|
|||
var wg sync.WaitGroup
|
||||
testFileName := "test_file.dat"
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
testFileSize := int64(262144)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
|
||||
closedConns := int32(0)
|
||||
for i := 0; i < numLogins; i++ {
|
||||
wg.Add(1)
|
||||
go func(counter int) {
|
||||
defer wg.Done()
|
||||
defer atomic.AddInt32(&closedConns, 1)
|
||||
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer wg.Done()
|
||||
defer client.Close()
|
||||
|
||||
err = checkBasicSFTP(client)
|
||||
assert.NoError(t, err)
|
||||
err = sftpUploadFile(testFilePath, testFileName+strconv.Itoa(counter), testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, common.Connections.GetActiveSessions(defaultUsername), 0)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
var statsWg sync.WaitGroup
|
||||
statsWg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer statsWg.Done()
|
||||
|
||||
maxConns := 0
|
||||
maxSessions := 0
|
||||
for {
|
||||
servedReqs := atomic.LoadInt32(&closedConns)
|
||||
if servedReqs > 0 {
|
||||
stats := common.Connections.GetStats()
|
||||
if len(stats) > maxConns {
|
||||
maxConns = len(stats)
|
||||
}
|
||||
activeSessions := common.Connections.GetActiveSessions(defaultUsername)
|
||||
if activeSessions > maxSessions {
|
||||
maxSessions = activeSessions
|
||||
}
|
||||
}
|
||||
if servedReqs >= int32(numLogins) {
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Greater(t, maxConns, 0)
|
||||
assert.Greater(t, maxSessions, 0)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
statsWg.Wait()
|
||||
|
||||
client, err := getSftpClient(user, usePubKey)
|
||||
if assert.NoError(t, err) {
|
||||
defer client.Close()
|
||||
files, err := client.ReadDir(".")
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, files, numLogins)
|
||||
client.Close()
|
||||
}
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
return common.Connections.GetActiveSessions(defaultUsername) == 0
|
||||
}, 1*time.Second, 50*time.Millisecond)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
return len(common.Connections.GetStats()) == 0
|
||||
}, 1*time.Second, 50*time.Millisecond)
|
||||
|
||||
err = os.Remove(testFilePath)
|
||||
assert.NoError(t, err)
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
|
@ -1945,16 +1996,14 @@ func TestQuotaScan(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMultipleQuotaScans(t *testing.T) {
|
||||
res := sftpd.AddQuotaScan(defaultUsername)
|
||||
res := common.QuotaScans.AddUserQuotaScan(defaultUsername)
|
||||
assert.True(t, res)
|
||||
res = sftpd.AddQuotaScan(defaultUsername)
|
||||
res = common.QuotaScans.AddUserQuotaScan(defaultUsername)
|
||||
assert.False(t, res, "add quota must fail if another scan is already active")
|
||||
err := sftpd.RemoveQuotaScan(defaultUsername)
|
||||
assert.NoError(t, err)
|
||||
activeScans := sftpd.GetQuotaScans()
|
||||
assert.True(t, common.QuotaScans.RemoveUserQuotaScan(defaultUsername))
|
||||
activeScans := common.QuotaScans.GetUsersQuotaScans()
|
||||
assert.Equal(t, 0, len(activeScans))
|
||||
err = sftpd.RemoveQuotaScan(defaultUsername)
|
||||
assert.Error(t, err)
|
||||
assert.False(t, common.QuotaScans.RemoveUserQuotaScan(defaultUsername))
|
||||
}
|
||||
|
||||
func TestQuotaLimits(t *testing.T) {
|
||||
|
@ -2071,7 +2120,6 @@ func TestBandwidthAndConnections(t *testing.T) {
|
|||
// wait some additional arbitrary time to wait for transfer activity to happen
|
||||
// it is need to reach all the code in CheckIdleConnections
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
sftpd.CheckIdleConnections()
|
||||
err = <-c
|
||||
assert.NoError(t, err)
|
||||
elapsed = time.Since(startTime).Nanoseconds() / 1000000
|
||||
|
@ -2080,10 +2128,9 @@ func TestBandwidthAndConnections(t *testing.T) {
|
|||
c = sftpUploadNonBlocking(testFilePath, testFileName+"_partial", testFileSize, client)
|
||||
waitForActiveTransfer()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
sftpd.CheckIdleConnections()
|
||||
stats := sftpd.GetConnectionsStats()
|
||||
stats := common.Connections.GetStats()
|
||||
for _, stat := range stats {
|
||||
sftpd.CloseActiveConnection(stat.ConnectionID)
|
||||
common.Connections.Close(stat.ConnectionID)
|
||||
}
|
||||
err = <-c
|
||||
assert.Error(t, err, "connection closed while uploading: the upload must fail")
|
||||
|
@ -3916,16 +3963,16 @@ func TestVirtualFolderQuotaScan(t *testing.T) {
|
|||
|
||||
func TestVFolderMultipleQuotaScan(t *testing.T) {
|
||||
folderPath := filepath.Join(os.TempDir(), "folder_path")
|
||||
res := sftpd.AddVFolderQuotaScan(folderPath)
|
||||
res := common.QuotaScans.AddVFolderQuotaScan(folderPath)
|
||||
assert.True(t, res)
|
||||
res = sftpd.AddVFolderQuotaScan(folderPath)
|
||||
res = common.QuotaScans.AddVFolderQuotaScan(folderPath)
|
||||
assert.False(t, res)
|
||||
err := sftpd.RemoveVFolderQuotaScan(folderPath)
|
||||
assert.NoError(t, err)
|
||||
activeScans := sftpd.GetVFoldersQuotaScans()
|
||||
res = common.QuotaScans.RemoveVFolderQuotaScan(folderPath)
|
||||
assert.True(t, res)
|
||||
activeScans := common.QuotaScans.GetVFoldersQuotaScans()
|
||||
assert.Len(t, activeScans, 0)
|
||||
err = sftpd.RemoveVFolderQuotaScan(folderPath)
|
||||
assert.Error(t, err)
|
||||
res = common.QuotaScans.RemoveVFolderQuotaScan(folderPath)
|
||||
assert.False(t, res)
|
||||
}
|
||||
|
||||
func TestVFolderQuotaSize(t *testing.T) {
|
||||
|
@ -4400,7 +4447,7 @@ func TestPermRename(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = client.Rename(testFileName, testFileName+".rename")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
_, err = client.Stat(testFileName)
|
||||
assert.NoError(t, err)
|
||||
|
@ -4438,7 +4485,7 @@ func TestPermRenameOverwrite(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = client.Rename(testFileName, testFileName+".rename")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Remove(testFileName)
|
||||
assert.NoError(t, err)
|
||||
|
@ -4612,26 +4659,29 @@ func TestSubDirsUploads(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
testFileName := "test_file.dat"
|
||||
testFileNameSub := "/subdir/test_file_dat"
|
||||
testSubFile := filepath.Join(user.GetHomeDir(), "subdir", "file.dat")
|
||||
testDir := "testdir"
|
||||
testFilePath := filepath.Join(homeBasePath, testFileName)
|
||||
testFileSize := int64(65535)
|
||||
err = createTestFile(testFilePath, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = createTestFile(testSubFile, testFileSize)
|
||||
assert.NoError(t, err)
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
err = sftpUploadFile(testFilePath, testFileNameSub, testFileSize, client)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Symlink(testFileName, testFileNameSub+".link")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Symlink(testFileName, testFileName+".link")
|
||||
assert.NoError(t, err)
|
||||
err = client.Rename(testFileName, testFileNameSub+".rename")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Rename(testFileName, testFileName+".rename")
|
||||
assert.NoError(t, err)
|
||||
|
@ -4651,9 +4701,9 @@ func TestSubDirsUploads(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = client.Remove(testDir)
|
||||
assert.NoError(t, err)
|
||||
err = client.Remove(testFileNameSub)
|
||||
err = client.Remove(path.Join("/subdir", "file.dat"))
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Remove(testFileName + ".rename")
|
||||
assert.NoError(t, err)
|
||||
|
@ -4686,7 +4736,7 @@ func TestSubDirsOverwrite(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = sftpUploadFile(testFilePath, testFileName+".new", testFileSize, client)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
assert.NoError(t, err)
|
||||
|
@ -4721,27 +4771,27 @@ func TestSubDirsDownloads(t *testing.T) {
|
|||
localDownloadPath := filepath.Join(homeBasePath, "test_download.dat")
|
||||
err = sftpDownloadFile(testFileName, localDownloadPath, testFileSize, client)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Chtimes(testFileName, time.Now(), time.Now())
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Rename(testFileName, testFileName+".rename")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Symlink(testFileName, testFileName+".link")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Remove(testFileName)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = os.Remove(localDownloadPath)
|
||||
assert.NoError(t, err)
|
||||
|
@ -4777,11 +4827,11 @@ func TestPermsSubDirsSetstat(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = client.Chtimes("/subdir/", time.Now(), time.Now())
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Chtimes("subdir/", time.Now(), time.Now())
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Chtimes(testFileName, time.Now(), time.Now())
|
||||
assert.NoError(t, err)
|
||||
|
@ -4816,15 +4866,15 @@ func TestPermsSubDirsCommands(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
_, err = client.ReadDir("/subdir")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.RemoveDirectory("/subdir/dir")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Mkdir("/subdir/otherdir/dir")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Mkdir("/otherdir")
|
||||
assert.NoError(t, err)
|
||||
|
@ -4832,11 +4882,11 @@ func TestPermsSubDirsCommands(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = client.Rename("/otherdir", "/subdir/otherdir/adir")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Symlink("/otherdir", "/subdir/otherdir")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Symlink("/otherdir", "/otherdir_link")
|
||||
assert.NoError(t, err)
|
||||
|
@ -4863,15 +4913,15 @@ func TestRootDirCommands(t *testing.T) {
|
|||
defer client.Close()
|
||||
err = client.Rename("/", "rootdir")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.Symlink("/", "rootdir")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
err = client.RemoveDirectory("/")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), permissionErrorString)
|
||||
assert.Contains(t, err.Error(), sftp.ErrSSHFxPermissionDenied.Error())
|
||||
}
|
||||
}
|
||||
_, err = httpd.RemoveUser(user, http.StatusOK)
|
||||
|
@ -6880,7 +6930,7 @@ func waitTCPListening(address string) {
|
|||
continue
|
||||
}
|
||||
logger.InfoToConsole("tcp server %v now listening\n", address)
|
||||
defer conn.Close()
|
||||
conn.Close()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -7261,19 +7311,19 @@ func computeHashForFile(hasher hash.Hash, path string) (string, error) {
|
|||
}
|
||||
|
||||
func waitForNoActiveTransfer() {
|
||||
for len(sftpd.GetConnectionsStats()) > 0 {
|
||||
for len(common.Connections.GetStats()) > 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForActiveTransfer() {
|
||||
stats := sftpd.GetConnectionsStats()
|
||||
stats := common.Connections.GetStats()
|
||||
for len(stats) < 1 {
|
||||
stats = sftpd.GetConnectionsStats()
|
||||
stats = common.Connections.GetStats()
|
||||
}
|
||||
activeTransferFound := false
|
||||
for !activeTransferFound {
|
||||
stats = sftpd.GetConnectionsStats()
|
||||
stats = common.Connections.GetStats()
|
||||
if len(stats) == 0 {
|
||||
break
|
||||
}
|
||||
|
|
239
sftpd/ssh_cmd.go
239
sftpd/ssh_cmd.go
|
@ -14,12 +14,12 @@ import (
|
|||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/shlex"
|
||||
fscopy "github.com/otiai10/copy"
|
||||
"golang.org/x/crypto/ssh"
|
||||
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/metrics"
|
||||
|
@ -27,21 +27,19 @@ import (
|
|||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
const scpCmdName = "scp"
|
||||
const (
|
||||
scpCmdName = "scp"
|
||||
sshCommandLogSender = "SSHCommand"
|
||||
)
|
||||
|
||||
var (
|
||||
errQuotaExceeded = errors.New("denying write due to space limit")
|
||||
errPermissionDenied = errors.New("Permission denied. You don't have the permissions to execute this command")
|
||||
errNotExist = errors.New("no such file or directory")
|
||||
errGenericFailure = errors.New("failure, this command cannot be executed")
|
||||
errUnsupportedConfig = errors.New("command unsupported for this configuration")
|
||||
errSkipPermissionsCheck = errors.New("permission check skipped")
|
||||
errUnsupportedConfig = errors.New("command unsupported for this configuration")
|
||||
)
|
||||
|
||||
type sshCommand struct {
|
||||
command string
|
||||
args []string
|
||||
connection Connection
|
||||
connection *Connection
|
||||
}
|
||||
|
||||
type systemCommand struct {
|
||||
|
@ -54,44 +52,45 @@ func processSSHCommand(payload []byte, connection *Connection, channel ssh.Chann
|
|||
var msg sshSubsystemExecMsg
|
||||
if err := ssh.Unmarshal(payload, &msg); err == nil {
|
||||
name, args, err := parseCommandPayload(msg.Command)
|
||||
connection.Log(logger.LevelDebug, logSenderSSH, "new ssh command: %#v args: %v num args: %v user: %v, error: %v",
|
||||
connection.Log(logger.LevelDebug, "new ssh command: %#v args: %v num args: %v user: %v, error: %v",
|
||||
name, args, len(args), connection.User.Username, err)
|
||||
if err == nil && utils.IsStringInSlice(name, enabledSSHCommands) {
|
||||
connection.command = msg.Command
|
||||
if name == scpCmdName && len(args) >= 2 {
|
||||
connection.protocol = protocolSCP
|
||||
connection.SetProtocol(common.ProtocolSCP)
|
||||
connection.channel = channel
|
||||
scpCommand := scpCommand{
|
||||
sshCommand: sshCommand{
|
||||
command: name,
|
||||
connection: *connection,
|
||||
connection: connection,
|
||||
args: args},
|
||||
}
|
||||
go scpCommand.handle() //nolint:errcheck
|
||||
return true
|
||||
}
|
||||
if name != scpCmdName {
|
||||
connection.protocol = protocolSSH
|
||||
connection.SetProtocol(common.ProtocolSSH)
|
||||
connection.channel = channel
|
||||
sshCommand := sshCommand{
|
||||
command: name,
|
||||
connection: *connection,
|
||||
connection: connection,
|
||||
args: args,
|
||||
}
|
||||
go sshCommand.handle() //nolint:errcheck
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
connection.Log(logger.LevelInfo, logSenderSSH, "ssh command not enabled/supported: %#v", name)
|
||||
connection.Log(logger.LevelInfo, "ssh command not enabled/supported: %#v", name)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *sshCommand) handle() error {
|
||||
addConnection(c.connection)
|
||||
defer removeConnection(c.connection)
|
||||
updateConnectionActivity(c.connection.ID)
|
||||
common.Connections.Add(c.connection)
|
||||
defer common.Connections.Remove(c.connection)
|
||||
|
||||
c.connection.UpdateLastActivity()
|
||||
if utils.IsStringInSlice(c.command, sshHashCommands) {
|
||||
return c.handleHashCommands()
|
||||
} else if utils.IsStringInSlice(c.command, systemCommands) {
|
||||
|
@ -115,7 +114,7 @@ func (c *sshCommand) handle() error {
|
|||
}
|
||||
|
||||
func (c *sshCommand) handeSFTPGoCopy() error {
|
||||
if !vfs.IsLocalOsFs(c.connection.fs) {
|
||||
if !vfs.IsLocalOsFs(c.connection.Fs) {
|
||||
return c.sendErrorResponse(errUnsupportedConfig)
|
||||
}
|
||||
sshSourcePath, sshDestPath, err := c.getCopyPaths()
|
||||
|
@ -130,10 +129,10 @@ func (c *sshCommand) handeSFTPGoCopy() error {
|
|||
return c.sendErrorResponse(err)
|
||||
}
|
||||
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "requested copy %#v -> %#v sftp paths %#v -> %#v",
|
||||
c.connection.Log(logger.LevelDebug, "requested copy %#v -> %#v sftp paths %#v -> %#v",
|
||||
fsSourcePath, fsDestPath, sshSourcePath, sshDestPath)
|
||||
|
||||
fi, err := c.connection.fs.Lstat(fsSourcePath)
|
||||
fi, err := c.connection.Fs.Lstat(fsSourcePath)
|
||||
if err != nil {
|
||||
return c.sendErrorResponse(err)
|
||||
}
|
||||
|
@ -143,7 +142,7 @@ func (c *sshCommand) handeSFTPGoCopy() error {
|
|||
filesNum := 0
|
||||
filesSize := int64(0)
|
||||
if fi.IsDir() {
|
||||
filesNum, filesSize, err = c.connection.fs.GetDirSize(fsSourcePath)
|
||||
filesNum, filesSize, err = c.connection.Fs.GetDirSize(fsSourcePath)
|
||||
if err != nil {
|
||||
return c.sendErrorResponse(err)
|
||||
}
|
||||
|
@ -169,7 +168,7 @@ func (c *sshCommand) handeSFTPGoCopy() error {
|
|||
if err := c.checkCopyQuota(filesNum, filesSize, sshDestPath); err != nil {
|
||||
return c.sendErrorResponse(err)
|
||||
}
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "start copy %#v -> %#v", fsSourcePath, fsDestPath)
|
||||
c.connection.Log(logger.LevelDebug, "start copy %#v -> %#v", fsSourcePath, fsDestPath)
|
||||
err = fscopy.Copy(fsSourcePath, fsDestPath)
|
||||
if err != nil {
|
||||
return c.sendErrorResponse(err)
|
||||
|
@ -181,7 +180,7 @@ func (c *sshCommand) handeSFTPGoCopy() error {
|
|||
}
|
||||
|
||||
func (c *sshCommand) handeSFTPGoRemove() error {
|
||||
if !vfs.IsLocalOsFs(c.connection.fs) {
|
||||
if !vfs.IsLocalOsFs(c.connection.Fs) {
|
||||
return c.sendErrorResponse(errUnsupportedConfig)
|
||||
}
|
||||
sshDestPath, err := c.getRemovePath()
|
||||
|
@ -189,20 +188,20 @@ func (c *sshCommand) handeSFTPGoRemove() error {
|
|||
return c.sendErrorResponse(err)
|
||||
}
|
||||
if !c.connection.User.HasPerm(dataprovider.PermDelete, path.Dir(sshDestPath)) {
|
||||
return c.sendErrorResponse(errPermissionDenied)
|
||||
return c.sendErrorResponse(common.ErrPermissionDenied)
|
||||
}
|
||||
fsDestPath, err := c.connection.fs.ResolvePath(sshDestPath)
|
||||
fsDestPath, err := c.connection.Fs.ResolvePath(sshDestPath)
|
||||
if err != nil {
|
||||
return c.sendErrorResponse(err)
|
||||
}
|
||||
fi, err := c.connection.fs.Lstat(fsDestPath)
|
||||
fi, err := c.connection.Fs.Lstat(fsDestPath)
|
||||
if err != nil {
|
||||
return c.sendErrorResponse(err)
|
||||
}
|
||||
filesNum := 0
|
||||
filesSize := int64(0)
|
||||
if fi.IsDir() {
|
||||
filesNum, filesSize, err = c.connection.fs.GetDirSize(fsDestPath)
|
||||
filesNum, filesSize, err = c.connection.Fs.GetDirSize(fsDestPath)
|
||||
if err != nil {
|
||||
return c.sendErrorResponse(err)
|
||||
}
|
||||
|
@ -253,7 +252,7 @@ func (c *sshCommand) updateQuota(sshDestPath string, filesNum int, filesSize int
|
|||
}
|
||||
|
||||
func (c *sshCommand) handleHashCommands() error {
|
||||
if !vfs.IsLocalOsFs(c.connection.fs) {
|
||||
if !vfs.IsLocalOsFs(c.connection.Fs) {
|
||||
return c.sendErrorResponse(errUnsupportedConfig)
|
||||
}
|
||||
var h hash.Hash
|
||||
|
@ -281,15 +280,15 @@ func (c *sshCommand) handleHashCommands() error {
|
|||
} else {
|
||||
sshPath := c.getDestPath()
|
||||
if !c.connection.User.IsFileAllowed(sshPath) {
|
||||
c.connection.Log(logger.LevelInfo, logSenderSSH, "hash not allowed for file %#v", sshPath)
|
||||
return c.sendErrorResponse(errPermissionDenied)
|
||||
c.connection.Log(logger.LevelInfo, "hash not allowed for file %#v", sshPath)
|
||||
return c.sendErrorResponse(common.ErrPermissionDenied)
|
||||
}
|
||||
fsPath, err := c.connection.fs.ResolvePath(sshPath)
|
||||
fsPath, err := c.connection.Fs.ResolvePath(sshPath)
|
||||
if err != nil {
|
||||
return c.sendErrorResponse(err)
|
||||
}
|
||||
if !c.connection.User.HasPerm(dataprovider.PermListItems, sshPath) {
|
||||
return c.sendErrorResponse(errPermissionDenied)
|
||||
return c.sendErrorResponse(common.ErrPermissionDenied)
|
||||
}
|
||||
hash, err := computeHashForFile(h, fsPath)
|
||||
if err != nil {
|
||||
|
@ -303,18 +302,18 @@ func (c *sshCommand) handleHashCommands() error {
|
|||
}
|
||||
|
||||
func (c *sshCommand) executeSystemCommand(command systemCommand) error {
|
||||
if !vfs.IsLocalOsFs(c.connection.fs) {
|
||||
if !vfs.IsLocalOsFs(c.connection.Fs) {
|
||||
return c.sendErrorResponse(errUnsupportedConfig)
|
||||
}
|
||||
sshDestPath := c.getDestPath()
|
||||
quotaResult := c.connection.hasSpace(true, command.quotaCheckPath)
|
||||
quotaResult := c.connection.HasSpace(true, command.quotaCheckPath)
|
||||
if !quotaResult.HasSpace {
|
||||
return c.sendErrorResponse(errQuotaExceeded)
|
||||
return c.sendErrorResponse(common.ErrQuotaExceeded)
|
||||
}
|
||||
perms := []string{dataprovider.PermDownload, dataprovider.PermUpload, dataprovider.PermCreateDirs, dataprovider.PermListItems,
|
||||
dataprovider.PermOverwrite, dataprovider.PermDelete}
|
||||
if !c.connection.User.HasPerms(perms, sshDestPath) {
|
||||
return c.sendErrorResponse(errPermissionDenied)
|
||||
return c.sendErrorResponse(common.ErrPermissionDenied)
|
||||
}
|
||||
|
||||
initialFiles, initialSize, err := c.getSizeForPath(command.fsPath)
|
||||
|
@ -340,11 +339,11 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
|
|||
}
|
||||
|
||||
closeCmdOnError := func() {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "kill cmd: %#v and close ssh channel after read or write error",
|
||||
c.connection.Log(logger.LevelDebug, "kill cmd: %#v and close ssh channel after read or write error",
|
||||
c.connection.command)
|
||||
killerr := command.cmd.Process.Kill()
|
||||
closerr := c.connection.channel.Close()
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "kill cmd error: %v close channel error: %v", killerr, closerr)
|
||||
c.connection.Log(logger.LevelDebug, "kill cmd error: %v close channel error: %v", killerr, closerr)
|
||||
}
|
||||
var once sync.Once
|
||||
commandResponse := make(chan bool)
|
||||
|
@ -353,28 +352,12 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
|
|||
|
||||
go func() {
|
||||
defer stdin.Close()
|
||||
transfer := Transfer{
|
||||
file: nil,
|
||||
path: command.fsPath,
|
||||
start: time.Now(),
|
||||
bytesSent: 0,
|
||||
bytesReceived: 0,
|
||||
user: c.connection.User,
|
||||
connectionID: c.connection.ID,
|
||||
transferType: transferUpload,
|
||||
lastActivity: time.Now(),
|
||||
isNewFile: false,
|
||||
protocol: c.connection.protocol,
|
||||
transferError: nil,
|
||||
isFinished: false,
|
||||
minWriteOffset: 0,
|
||||
maxWriteSize: remainingQuotaSize,
|
||||
lock: new(sync.Mutex),
|
||||
}
|
||||
addTransfer(&transfer)
|
||||
defer removeTransfer(&transfer) //nolint:errcheck
|
||||
baseTransfer := common.NewBaseTransfer(nil, c.connection.BaseConnection, nil, command.fsPath, sshDestPath,
|
||||
common.TransferUpload, 0, 0, false)
|
||||
transfer := newTranfer(baseTransfer, nil, nil, remainingQuotaSize)
|
||||
|
||||
w, e := transfer.copyFromReaderToWriter(stdin, c.connection.channel)
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "command: %#v, copy from remote command to sdtin ended, written: %v, "+
|
||||
c.connection.Log(logger.LevelDebug, "command: %#v, copy from remote command to sdtin ended, written: %v, "+
|
||||
"initial remaining quota: %v, err: %v", c.connection.command, w, remainingQuotaSize, e)
|
||||
if e != nil {
|
||||
once.Do(closeCmdOnError)
|
||||
|
@ -382,27 +365,12 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
|
|||
}()
|
||||
|
||||
go func() {
|
||||
transfer := Transfer{
|
||||
file: nil,
|
||||
path: command.fsPath,
|
||||
start: time.Now(),
|
||||
bytesSent: 0,
|
||||
bytesReceived: 0,
|
||||
user: c.connection.User,
|
||||
connectionID: c.connection.ID,
|
||||
transferType: transferDownload,
|
||||
lastActivity: time.Now(),
|
||||
isNewFile: false,
|
||||
protocol: c.connection.protocol,
|
||||
transferError: nil,
|
||||
isFinished: false,
|
||||
minWriteOffset: 0,
|
||||
lock: new(sync.Mutex),
|
||||
}
|
||||
addTransfer(&transfer)
|
||||
defer removeTransfer(&transfer) //nolint:errcheck
|
||||
baseTransfer := common.NewBaseTransfer(nil, c.connection.BaseConnection, nil, command.fsPath, sshDestPath,
|
||||
common.TransferDownload, 0, 0, false)
|
||||
transfer := newTranfer(baseTransfer, nil, nil, 0)
|
||||
|
||||
w, e := transfer.copyFromReaderToWriter(c.connection.channel, stdout)
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "command: %#v, copy from sdtout to remote command ended, written: %v err: %v",
|
||||
c.connection.Log(logger.LevelDebug, "command: %#v, copy from sdtout to remote command ended, written: %v err: %v",
|
||||
c.connection.command, w, e)
|
||||
if e != nil {
|
||||
once.Do(closeCmdOnError)
|
||||
|
@ -411,27 +379,12 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
|
|||
}()
|
||||
|
||||
go func() {
|
||||
transfer := Transfer{
|
||||
file: nil,
|
||||
path: command.fsPath,
|
||||
start: time.Now(),
|
||||
bytesSent: 0,
|
||||
bytesReceived: 0,
|
||||
user: c.connection.User,
|
||||
connectionID: c.connection.ID,
|
||||
transferType: transferDownload,
|
||||
lastActivity: time.Now(),
|
||||
isNewFile: false,
|
||||
protocol: c.connection.protocol,
|
||||
transferError: nil,
|
||||
isFinished: false,
|
||||
minWriteOffset: 0,
|
||||
lock: new(sync.Mutex),
|
||||
}
|
||||
addTransfer(&transfer)
|
||||
defer removeTransfer(&transfer) //nolint:errcheck
|
||||
baseTransfer := common.NewBaseTransfer(nil, c.connection.BaseConnection, nil, command.fsPath, sshDestPath,
|
||||
common.TransferDownload, 0, 0, false)
|
||||
transfer := newTranfer(baseTransfer, nil, nil, 0)
|
||||
|
||||
w, e := transfer.copyFromReaderToWriter(c.connection.channel.Stderr(), stderr)
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "command: %#v, copy from sdterr to remote command ended, written: %v err: %v",
|
||||
c.connection.Log(logger.LevelDebug, "command: %#v, copy from sdterr to remote command ended, written: %v err: %v",
|
||||
c.connection.command, w, e)
|
||||
// os.ErrClosed means that the command is finished so we don't need to do anything
|
||||
if (e != nil && !errors.Is(e, os.ErrClosed)) || w > 0 {
|
||||
|
@ -447,7 +400,7 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
|
|||
if errSize == nil {
|
||||
c.updateQuota(sshDestPath, numFiles-initialFiles, dirSize-initialSize)
|
||||
}
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "command %#v finished for path %#v, initial files %v initial size %v "+
|
||||
c.connection.Log(logger.LevelDebug, "command %#v finished for path %#v, initial files %v initial size %v "+
|
||||
"current files %v current size %v size err: %v", c.connection.command, command.fsPath, initialFiles, initialSize,
|
||||
numFiles, dirSize, errSize)
|
||||
return err
|
||||
|
@ -460,20 +413,20 @@ func (c *sshCommand) isSystemCommandAllowed() error {
|
|||
return nil
|
||||
}
|
||||
if c.connection.User.HasVirtualFoldersInside(sshDestPath) {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "command %#v is not allowed, path %#v has virtual folders inside it, user %#v",
|
||||
c.connection.Log(logger.LevelDebug, "command %#v is not allowed, path %#v has virtual folders inside it, user %#v",
|
||||
c.command, sshDestPath, c.connection.User.Username)
|
||||
return errUnsupportedConfig
|
||||
}
|
||||
for _, f := range c.connection.User.Filters.FileExtensions {
|
||||
if f.Path == sshDestPath {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH,
|
||||
c.connection.Log(logger.LevelDebug,
|
||||
"command %#v is not allowed inside folders with files extensions filters %#v user %#v",
|
||||
c.command, sshDestPath, c.connection.User.Username)
|
||||
return errUnsupportedConfig
|
||||
}
|
||||
if len(sshDestPath) > len(f.Path) {
|
||||
if strings.HasPrefix(sshDestPath, f.Path+"/") || f.Path == "/" {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH,
|
||||
c.connection.Log(logger.LevelDebug,
|
||||
"command %#v is not allowed it includes folders with files extensions filters %#v user %#v",
|
||||
c.command, sshDestPath, c.connection.User.Username)
|
||||
return errUnsupportedConfig
|
||||
|
@ -481,7 +434,7 @@ func (c *sshCommand) isSystemCommandAllowed() error {
|
|||
}
|
||||
if len(sshDestPath) < len(f.Path) {
|
||||
if strings.HasPrefix(sshDestPath+"/", f.Path) || sshDestPath == "/" {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH,
|
||||
c.connection.Log(logger.LevelDebug,
|
||||
"command %#v is not allowed inside folder with files extensions filters %#v user %#v",
|
||||
c.command, sshDestPath, c.connection.User.Username)
|
||||
return errUnsupportedConfig
|
||||
|
@ -503,12 +456,12 @@ func (c *sshCommand) getSystemCommand() (systemCommand, error) {
|
|||
if len(c.args) > 0 {
|
||||
var err error
|
||||
sshPath := c.getDestPath()
|
||||
fsPath, err = c.connection.fs.ResolvePath(sshPath)
|
||||
fsPath, err = c.connection.Fs.ResolvePath(sshPath)
|
||||
if err != nil {
|
||||
return command, err
|
||||
}
|
||||
quotaPath = sshPath
|
||||
fi, err := c.connection.fs.Stat(fsPath)
|
||||
fi, err := c.connection.Fs.Stat(fsPath)
|
||||
if err == nil && fi.IsDir() {
|
||||
// if the target is an existing dir the command will write inside this dir
|
||||
// so we need to check the quota for this directory and not its parent dir
|
||||
|
@ -537,7 +490,7 @@ func (c *sshCommand) getSystemCommand() (systemCommand, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "new system command %#v, with args: %+v fs path %#v quota check path %#v",
|
||||
c.connection.Log(logger.LevelDebug, "new system command %#v, with args: %+v fs path %#v quota check path %#v",
|
||||
c.command, args, fsPath, quotaPath)
|
||||
cmd := exec.Command(c.command, args...)
|
||||
uid := c.connection.User.GetUID()
|
||||
|
@ -575,18 +528,6 @@ func cleanCommandPath(name string) string {
|
|||
return result
|
||||
}
|
||||
|
||||
// we try to avoid to leak the real filesystem path here
|
||||
func (c *sshCommand) getMappedError(err error) error {
|
||||
if c.connection.fs.IsNotExist(err) {
|
||||
return errNotExist
|
||||
}
|
||||
if c.connection.fs.IsPermission(err) {
|
||||
return errPermissionDenied
|
||||
}
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "unhandled error for SSH command, a generic failure will be sent: %v", err)
|
||||
return errGenericFailure
|
||||
}
|
||||
|
||||
func (c *sshCommand) getCopyPaths() (string, string, error) {
|
||||
sshSourcePath := strings.TrimSuffix(c.getSourcePath(), "/")
|
||||
sshDestPath := c.getDestPath()
|
||||
|
@ -615,7 +556,7 @@ func (c *sshCommand) hasCopyPermissions(sshSourcePath, sshDestPath string, srcIn
|
|||
// fsSourcePath must be a directory
|
||||
func (c *sshCommand) checkRecursiveCopyPermissions(fsSourcePath, fsDestPath, sshDestPath string) error {
|
||||
if !c.connection.User.HasPerm(dataprovider.PermCreateDirs, path.Dir(sshDestPath)) {
|
||||
return errPermissionDenied
|
||||
return common.ErrPermissionDenied
|
||||
}
|
||||
dstPerms := []string{
|
||||
dataprovider.PermCreateDirs,
|
||||
|
@ -623,28 +564,28 @@ func (c *sshCommand) checkRecursiveCopyPermissions(fsSourcePath, fsDestPath, ssh
|
|||
dataprovider.PermUpload,
|
||||
}
|
||||
|
||||
err := c.connection.fs.Walk(fsSourcePath, func(walkedPath string, info os.FileInfo, err error) error {
|
||||
err := c.connection.Fs.Walk(fsSourcePath, func(walkedPath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fsDstSubPath := strings.Replace(walkedPath, fsSourcePath, fsDestPath, 1)
|
||||
sshSrcSubPath := c.connection.fs.GetRelativePath(walkedPath)
|
||||
sshDstSubPath := c.connection.fs.GetRelativePath(fsDstSubPath)
|
||||
sshSrcSubPath := c.connection.Fs.GetRelativePath(walkedPath)
|
||||
sshDstSubPath := c.connection.Fs.GetRelativePath(fsDstSubPath)
|
||||
// If the current dir has no subdirs with defined permissions inside it
|
||||
// and it has all the possible permissions we can stop scanning
|
||||
if !c.connection.User.HasPermissionsInside(path.Dir(sshSrcSubPath)) &&
|
||||
!c.connection.User.HasPermissionsInside(path.Dir(sshDstSubPath)) {
|
||||
if c.connection.User.HasPerm(dataprovider.PermListItems, path.Dir(sshSrcSubPath)) &&
|
||||
c.connection.User.HasPerms(dstPerms, path.Dir(sshDstSubPath)) {
|
||||
return errSkipPermissionsCheck
|
||||
return common.ErrSkipPermissionsCheck
|
||||
}
|
||||
}
|
||||
if !c.hasCopyPermissions(sshSrcSubPath, sshDstSubPath, info) {
|
||||
return errPermissionDenied
|
||||
return common.ErrPermissionDenied
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err == errSkipPermissionsCheck {
|
||||
if err == common.ErrSkipPermissionsCheck {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
|
@ -655,7 +596,7 @@ func (c *sshCommand) checkCopyPermissions(fsSourcePath, fsDestPath, sshSourcePat
|
|||
return c.checkRecursiveCopyPermissions(fsSourcePath, fsDestPath, sshDestPath)
|
||||
}
|
||||
if !c.hasCopyPermissions(sshSourcePath, sshDestPath, info) {
|
||||
return errPermissionDenied
|
||||
return common.ErrPermissionDenied
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -673,11 +614,11 @@ func (c *sshCommand) getRemovePath() (string, error) {
|
|||
}
|
||||
|
||||
func (c *sshCommand) resolveCopyPaths(sshSourcePath, sshDestPath string) (string, string, error) {
|
||||
fsSourcePath, err := c.connection.fs.ResolvePath(sshSourcePath)
|
||||
fsSourcePath, err := c.connection.Fs.ResolvePath(sshSourcePath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
fsDestPath, err := c.connection.fs.ResolvePath(sshDestPath)
|
||||
fsDestPath, err := c.connection.Fs.ResolvePath(sshDestPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
@ -685,35 +626,35 @@ func (c *sshCommand) resolveCopyPaths(sshSourcePath, sshDestPath string) (string
|
|||
}
|
||||
|
||||
func (c *sshCommand) checkCopyDestination(fsDestPath string) error {
|
||||
_, err := c.connection.fs.Lstat(fsDestPath)
|
||||
_, err := c.connection.Fs.Lstat(fsDestPath)
|
||||
if err == nil {
|
||||
err := errors.New("invalid copy destination: cannot overwrite an existing file or directory")
|
||||
return err
|
||||
} else if !c.connection.fs.IsNotExist(err) {
|
||||
} else if !c.connection.Fs.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *sshCommand) checkCopyQuota(numFiles int, filesSize int64, requestPath string) error {
|
||||
quotaResult := c.connection.hasSpace(true, requestPath)
|
||||
quotaResult := c.connection.HasSpace(true, requestPath)
|
||||
if !quotaResult.HasSpace {
|
||||
return errQuotaExceeded
|
||||
return common.ErrQuotaExceeded
|
||||
}
|
||||
if quotaResult.QuotaFiles > 0 {
|
||||
remainingFiles := quotaResult.GetRemainingFiles()
|
||||
if remainingFiles < numFiles {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "copy not allowed, file limit will be exceeded, "+
|
||||
c.connection.Log(logger.LevelDebug, "copy not allowed, file limit will be exceeded, "+
|
||||
"remaining files: %v to copy: %v", remainingFiles, numFiles)
|
||||
return errQuotaExceeded
|
||||
return common.ErrQuotaExceeded
|
||||
}
|
||||
}
|
||||
if quotaResult.QuotaSize > 0 {
|
||||
remainingSize := quotaResult.GetRemainingSize()
|
||||
if remainingSize < filesSize {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "copy not allowed, size limit will be exceeded, "+
|
||||
c.connection.Log(logger.LevelDebug, "copy not allowed, size limit will be exceeded, "+
|
||||
"remaining size: %v to copy: %v", remainingSize, filesSize)
|
||||
return errQuotaExceeded
|
||||
return common.ErrQuotaExceeded
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -721,18 +662,18 @@ func (c *sshCommand) checkCopyQuota(numFiles int, filesSize int64, requestPath s
|
|||
|
||||
func (c *sshCommand) getSizeForPath(name string) (int, int64, error) {
|
||||
if dataprovider.GetQuotaTracking() > 0 {
|
||||
fi, err := c.connection.fs.Lstat(name)
|
||||
fi, err := c.connection.Fs.Lstat(name)
|
||||
if err != nil {
|
||||
if c.connection.fs.IsNotExist(err) {
|
||||
if c.connection.Fs.IsNotExist(err) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "unable to stat %#v error: %v", name, err)
|
||||
c.connection.Log(logger.LevelDebug, "unable to stat %#v error: %v", name, err)
|
||||
return 0, 0, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
files, size, err := c.connection.fs.GetDirSize(name)
|
||||
files, size, err := c.connection.Fs.GetDirSize(name)
|
||||
if err != nil {
|
||||
c.connection.Log(logger.LevelDebug, logSenderSSH, "unable to get size for dir %#v error: %v", name, err)
|
||||
c.connection.Log(logger.LevelDebug, "unable to get size for dir %#v error: %v", name, err)
|
||||
}
|
||||
return files, size, err
|
||||
} else if fi.Mode().IsRegular() {
|
||||
|
@ -743,7 +684,7 @@ func (c *sshCommand) getSizeForPath(name string) (int, int64, error) {
|
|||
}
|
||||
|
||||
func (c *sshCommand) sendErrorResponse(err error) error {
|
||||
errorString := fmt.Sprintf("%v: %v %v\n", c.command, c.getDestPath(), c.getMappedError(err))
|
||||
errorString := fmt.Sprintf("%v: %v %v\n", c.command, c.getDestPath(), c.connection.GetFsError(err))
|
||||
c.connection.channel.Write([]byte(errorString)) //nolint:errcheck
|
||||
c.sendExitStatus(err)
|
||||
return err
|
||||
|
@ -759,11 +700,11 @@ func (c *sshCommand) sendExitStatus(err error) {
|
|||
}
|
||||
if err != nil {
|
||||
status = uint32(1)
|
||||
c.connection.Log(logger.LevelWarn, logSenderSSH, "command failed: %#v args: %v user: %v err: %v",
|
||||
c.connection.Log(logger.LevelWarn, "command failed: %#v args: %v user: %v err: %v",
|
||||
c.command, c.args, c.connection.User.Username, err)
|
||||
} else {
|
||||
logger.CommandLog(sshCommandLogSender, cmdPath, targetPath, c.connection.User.Username, "", c.connection.ID,
|
||||
protocolSSH, -1, -1, "", "", c.connection.command)
|
||||
common.ProtocolSSH, -1, -1, "", "", c.connection.command)
|
||||
}
|
||||
exitStatus := sshSubsystemExitStatus{
|
||||
Status: status,
|
||||
|
@ -774,18 +715,18 @@ func (c *sshCommand) sendExitStatus(err error) {
|
|||
if c.command != scpCmdName {
|
||||
metrics.SSHCommandCompleted(err)
|
||||
if len(cmdPath) > 0 {
|
||||
p, e := c.connection.fs.ResolvePath(cmdPath)
|
||||
p, e := c.connection.Fs.ResolvePath(cmdPath)
|
||||
if e == nil {
|
||||
cmdPath = p
|
||||
}
|
||||
}
|
||||
if len(targetPath) > 0 {
|
||||
p, e := c.connection.fs.ResolvePath(targetPath)
|
||||
p, e := c.connection.Fs.ResolvePath(targetPath)
|
||||
if e == nil {
|
||||
targetPath = p
|
||||
}
|
||||
}
|
||||
go executeAction(newActionNotification(c.connection.User, operationSSHCmd, cmdPath, targetPath, c.command, 0, err)) //nolint:errcheck
|
||||
common.SSHCommandActionNotification(&c.connection.User, cmdPath, targetPath, c.command, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,123 +1,99 @@
|
|||
package sftpd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/eikenb/pipeat"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/common"
|
||||
"github.com/drakkan/sftpgo/metrics"
|
||||
"github.com/drakkan/sftpgo/vfs"
|
||||
)
|
||||
|
||||
const (
|
||||
transferUpload = iota
|
||||
transferDownload
|
||||
)
|
||||
|
||||
var (
|
||||
errTransferClosed = errors.New("transfer already closed")
|
||||
)
|
||||
|
||||
// Transfer contains the transfer details for an upload or a download.
|
||||
// It implements the io Reader and Writer interface to handle files downloads and uploads
|
||||
type Transfer struct {
|
||||
file *os.File
|
||||
writerAt *vfs.PipeWriter
|
||||
readerAt *pipeat.PipeReaderAt
|
||||
cancelFn func()
|
||||
path string
|
||||
start time.Time
|
||||
bytesSent int64
|
||||
bytesReceived int64
|
||||
user dataprovider.User
|
||||
connectionID string
|
||||
transferType int
|
||||
lastActivity time.Time
|
||||
protocol string
|
||||
transferError error
|
||||
minWriteOffset int64
|
||||
initialSize int64
|
||||
lock *sync.Mutex
|
||||
isNewFile bool
|
||||
isFinished bool
|
||||
requestPath string
|
||||
maxWriteSize int64
|
||||
type writerAtCloser interface {
|
||||
io.WriterAt
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// TransferError is called if there is an unexpected error.
|
||||
// For example network or client issues
|
||||
func (t *Transfer) TransferError(err error) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if t.transferError != nil {
|
||||
return
|
||||
type readerAtCloser interface {
|
||||
io.ReaderAt
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// transfer defines the transfer details.
|
||||
// It implements the io.ReaderAt and io.WriterAt interfaces to handle SFTP downloads and uploads
|
||||
type transfer struct {
|
||||
*common.BaseTransfer
|
||||
writerAt writerAtCloser
|
||||
readerAt readerAtCloser
|
||||
isFinished bool
|
||||
maxWriteSize int64
|
||||
}
|
||||
|
||||
func newTranfer(baseTransfer *common.BaseTransfer, pipeWriter *vfs.PipeWriter, pipeReader *pipeat.PipeReaderAt,
|
||||
maxWriteSize int64) *transfer {
|
||||
var writer writerAtCloser
|
||||
var reader readerAtCloser
|
||||
if baseTransfer.File != nil {
|
||||
writer = baseTransfer.File
|
||||
reader = baseTransfer.File
|
||||
} else if pipeWriter != nil {
|
||||
writer = pipeWriter
|
||||
} else if pipeReader != nil {
|
||||
reader = pipeReader
|
||||
}
|
||||
t.transferError = err
|
||||
if t.cancelFn != nil {
|
||||
t.cancelFn()
|
||||
return &transfer{
|
||||
BaseTransfer: baseTransfer,
|
||||
writerAt: writer,
|
||||
readerAt: reader,
|
||||
isFinished: false,
|
||||
maxWriteSize: maxWriteSize,
|
||||
}
|
||||
elapsed := time.Since(t.start).Nanoseconds() / 1000000
|
||||
logger.Warn(logSender, t.connectionID, "Unexpected error for transfer, path: %#v, error: \"%v\" bytes sent: %v, "+
|
||||
"bytes received: %v transfer running since %v ms", t.path, t.transferError, t.bytesSent, t.bytesReceived, elapsed)
|
||||
}
|
||||
|
||||
// ReadAt reads len(p) bytes from the File to download starting at byte offset off and updates the bytes sent.
|
||||
// It handles download bandwidth throttling too
|
||||
func (t *Transfer) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
t.lastActivity = time.Now()
|
||||
func (t *transfer) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
t.Connection.UpdateLastActivity()
|
||||
var readed int
|
||||
var e error
|
||||
if t.readerAt != nil {
|
||||
readed, e = t.readerAt.ReadAt(p, off)
|
||||
} else {
|
||||
readed, e = t.file.ReadAt(p, off)
|
||||
}
|
||||
t.lock.Lock()
|
||||
t.bytesSent += int64(readed)
|
||||
t.lock.Unlock()
|
||||
|
||||
readed, e = t.readerAt.ReadAt(p, off)
|
||||
atomic.AddInt64(&t.BytesSent, int64(readed))
|
||||
|
||||
if e != nil && e != io.EOF {
|
||||
t.TransferError(e)
|
||||
return readed, e
|
||||
}
|
||||
t.handleThrottle()
|
||||
t.HandleThrottle()
|
||||
return readed, e
|
||||
}
|
||||
|
||||
// WriteAt writes len(p) bytes to the uploaded file starting at byte offset off and updates the bytes received.
|
||||
// It handles upload bandwidth throttling too
|
||||
func (t *Transfer) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
t.lastActivity = time.Now()
|
||||
if off < t.minWriteOffset {
|
||||
err := fmt.Errorf("Invalid write offset: %v minimum valid value: %v", off, t.minWriteOffset)
|
||||
func (t *transfer) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
t.Connection.UpdateLastActivity()
|
||||
if off < t.MinWriteOffset {
|
||||
err := fmt.Errorf("Invalid write offset: %v minimum valid value: %v", off, t.MinWriteOffset)
|
||||
t.TransferError(err)
|
||||
return 0, err
|
||||
}
|
||||
var written int
|
||||
var e error
|
||||
if t.writerAt != nil {
|
||||
written, e = t.writerAt.WriteAt(p, off)
|
||||
} else {
|
||||
written, e = t.file.WriteAt(p, off)
|
||||
|
||||
written, e = t.writerAt.WriteAt(p, off)
|
||||
atomic.AddInt64(&t.BytesReceived, int64(written))
|
||||
|
||||
if t.maxWriteSize > 0 && e == nil && atomic.LoadInt64(&t.BytesReceived) > t.maxWriteSize {
|
||||
e = common.ErrQuotaExceeded
|
||||
}
|
||||
t.lock.Lock()
|
||||
t.bytesReceived += int64(written)
|
||||
if e == nil && t.maxWriteSize > 0 && t.bytesReceived > t.maxWriteSize {
|
||||
e = errQuotaExceeded
|
||||
}
|
||||
t.lock.Unlock()
|
||||
if e != nil {
|
||||
t.TransferError(e)
|
||||
return written, e
|
||||
}
|
||||
t.handleThrottle()
|
||||
t.HandleThrottle()
|
||||
return written, e
|
||||
}
|
||||
|
||||
|
@ -126,147 +102,74 @@ func (t *Transfer) WriteAt(p []byte, off int64) (n int, err error) {
|
|||
// and executes any defined action.
|
||||
// If there is an error no action will be executed and, in atomic mode, we try to delete
|
||||
// the temporary file
|
||||
func (t *Transfer) Close() error {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if t.isFinished {
|
||||
return errTransferClosed
|
||||
func (t *transfer) Close() error {
|
||||
if err := t.setFinished(); err != nil {
|
||||
return err
|
||||
}
|
||||
err := t.closeIO()
|
||||
defer removeTransfer(t) //nolint:errcheck
|
||||
t.isFinished = true
|
||||
numFiles := 0
|
||||
if t.isNewFile {
|
||||
numFiles = 1
|
||||
errBaseClose := t.BaseTransfer.Close()
|
||||
if errBaseClose != nil {
|
||||
err = errBaseClose
|
||||
}
|
||||
metrics.TransferCompleted(t.bytesSent, t.bytesReceived, t.transferType, t.transferError)
|
||||
if t.transferError == errQuotaExceeded && t.file != nil {
|
||||
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
|
||||
err = os.Remove(t.file.Name())
|
||||
if err == nil {
|
||||
numFiles--
|
||||
t.bytesReceived = 0
|
||||
t.minWriteOffset = 0
|
||||
}
|
||||
logger.Warn(logSender, t.connectionID, "upload denied due to space limit, delete temporary file: %#v, deletion error: %v",
|
||||
t.file.Name(), err)
|
||||
} else if t.transferType == transferUpload && t.file != nil && t.file.Name() != t.path {
|
||||
if t.transferError == nil || uploadMode == uploadModeAtomicWithResume {
|
||||
err = os.Rename(t.file.Name(), t.path)
|
||||
logger.Debug(logSender, t.connectionID, "atomic upload completed, rename: %#v -> %#v, error: %v",
|
||||
t.file.Name(), t.path, err)
|
||||
} else {
|
||||
err = os.Remove(t.file.Name())
|
||||
logger.Warn(logSender, t.connectionID, "atomic upload completed with error: \"%v\", delete temporary file: %#v, "+
|
||||
"deletion error: %v", t.transferError, t.file.Name(), err)
|
||||
if err == nil {
|
||||
numFiles--
|
||||
t.bytesReceived = 0
|
||||
t.minWriteOffset = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
elapsed := time.Since(t.start).Nanoseconds() / 1000000
|
||||
if t.transferType == transferDownload {
|
||||
logger.TransferLog(downloadLogSender, t.path, elapsed, t.bytesSent, t.user.Username, t.connectionID, t.protocol)
|
||||
go executeAction(newActionNotification(t.user, operationDownload, t.path, "", "", t.bytesSent, t.transferError)) //nolint:errcheck
|
||||
} else {
|
||||
logger.TransferLog(uploadLogSender, t.path, elapsed, t.bytesReceived, t.user.Username, t.connectionID, t.protocol)
|
||||
go executeAction(newActionNotification(t.user, operationUpload, t.path, "", "", t.bytesReceived+t.minWriteOffset, //nolint:errcheck
|
||||
t.transferError))
|
||||
}
|
||||
if t.transferError != nil {
|
||||
logger.Warn(logSender, t.connectionID, "transfer error: %v, path: %#v", t.transferError, t.path)
|
||||
if err == nil {
|
||||
err = t.transferError
|
||||
}
|
||||
}
|
||||
t.updateQuota(numFiles)
|
||||
return err
|
||||
return t.Connection.GetFsError(err)
|
||||
}
|
||||
|
||||
func (t *Transfer) closeIO() error {
|
||||
func (t *transfer) closeIO() error {
|
||||
var err error
|
||||
if t.writerAt != nil {
|
||||
if t.File != nil {
|
||||
err = t.File.Close()
|
||||
} else if t.writerAt != nil {
|
||||
err = t.writerAt.Close()
|
||||
if err != nil {
|
||||
t.transferError = err
|
||||
t.Lock()
|
||||
// we set ErrTransfer here so quota is not updated, in this case the uploads are atomic
|
||||
if err != nil && t.ErrTransfer == nil {
|
||||
t.ErrTransfer = err
|
||||
}
|
||||
t.Unlock()
|
||||
} else if t.readerAt != nil {
|
||||
err = t.readerAt.Close()
|
||||
} else {
|
||||
err = t.file.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *Transfer) updateQuota(numFiles int) bool {
|
||||
// S3 uploads are atomic, if there is an error nothing is uploaded
|
||||
if t.file == nil && t.transferError != nil {
|
||||
return false
|
||||
}
|
||||
if t.transferType == transferUpload && (numFiles != 0 || t.bytesReceived > 0) {
|
||||
vfolder, err := t.user.GetVirtualFolderForPath(path.Dir(t.requestPath))
|
||||
if err == nil {
|
||||
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, numFiles, //nolint:errcheck
|
||||
t.bytesReceived-t.initialSize, false)
|
||||
if vfolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(t.user, numFiles, t.bytesReceived-t.initialSize, false) //nolint:errcheck
|
||||
}
|
||||
} else {
|
||||
dataprovider.UpdateUserQuota(t.user, numFiles, t.bytesReceived-t.initialSize, false) //nolint:errcheck
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *Transfer) handleThrottle() {
|
||||
var wantedBandwidth int64
|
||||
var trasferredBytes int64
|
||||
if t.transferType == transferDownload {
|
||||
wantedBandwidth = t.user.DownloadBandwidth
|
||||
trasferredBytes = t.bytesSent
|
||||
} else {
|
||||
wantedBandwidth = t.user.UploadBandwidth
|
||||
trasferredBytes = t.bytesReceived
|
||||
}
|
||||
if wantedBandwidth > 0 {
|
||||
// real and wanted elapsed as milliseconds, bytes as kilobytes
|
||||
realElapsed := time.Since(t.start).Nanoseconds() / 1000000
|
||||
// trasferredBytes / 1000 = KB/s, we multiply for 1000 to get milliseconds
|
||||
wantedElapsed := 1000 * (trasferredBytes / 1000) / wantedBandwidth
|
||||
if wantedElapsed > realElapsed {
|
||||
toSleep := time.Duration(wantedElapsed - realElapsed)
|
||||
time.Sleep(toSleep * time.Millisecond)
|
||||
}
|
||||
func (t *transfer) setFinished() error {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
if t.isFinished {
|
||||
return common.ErrTransferClosed
|
||||
}
|
||||
t.isFinished = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// used for ssh commands.
|
||||
// It reads from src until EOF so it does not treat an EOF from Read as an error to be reported.
|
||||
// EOF from Write is reported as error
|
||||
func (t *Transfer) copyFromReaderToWriter(dst io.Writer, src io.Reader) (int64, error) {
|
||||
func (t *transfer) copyFromReaderToWriter(dst io.Writer, src io.Reader) (int64, error) {
|
||||
defer t.Connection.RemoveTransfer(t)
|
||||
|
||||
var written int64
|
||||
var err error
|
||||
|
||||
if t.maxWriteSize < 0 {
|
||||
return 0, errQuotaExceeded
|
||||
return 0, common.ErrQuotaExceeded
|
||||
}
|
||||
isDownload := t.GetType() == common.TransferDownload
|
||||
buf := make([]byte, 32768)
|
||||
for {
|
||||
t.lastActivity = time.Now()
|
||||
t.Connection.UpdateLastActivity()
|
||||
nr, er := src.Read(buf)
|
||||
if nr > 0 {
|
||||
nw, ew := dst.Write(buf[0:nr])
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
if t.transferType == transferDownload {
|
||||
t.bytesSent = written
|
||||
if isDownload {
|
||||
atomic.StoreInt64(&t.BytesSent, written)
|
||||
} else {
|
||||
t.bytesReceived = written
|
||||
atomic.StoreInt64(&t.BytesReceived, written)
|
||||
}
|
||||
if t.maxWriteSize > 0 && written > t.maxWriteSize {
|
||||
err = errQuotaExceeded
|
||||
err = common.ErrQuotaExceeded
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -285,11 +188,11 @@ func (t *Transfer) copyFromReaderToWriter(dst io.Writer, src io.Reader) (int64,
|
|||
}
|
||||
break
|
||||
}
|
||||
t.handleThrottle()
|
||||
t.HandleThrottle()
|
||||
}
|
||||
t.transferError = err
|
||||
if t.bytesSent > 0 || t.bytesReceived > 0 || err != nil {
|
||||
metrics.TransferCompleted(t.bytesSent, t.bytesReceived, t.transferType, t.transferError)
|
||||
t.ErrTransfer = err
|
||||
if written > 0 || err != nil {
|
||||
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.GetType(), t.ErrTransfer)
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
|
21
sftpgo.json
21
sftpgo.json
|
@ -1,23 +1,26 @@
|
|||
{
|
||||
"sftpd": {
|
||||
"bind_port": 2022,
|
||||
"bind_address": "",
|
||||
"common": {
|
||||
"idle_timeout": 15,
|
||||
"max_auth_tries": 0,
|
||||
"umask": "0022",
|
||||
"banner": "",
|
||||
"upload_mode": 0,
|
||||
"actions": {
|
||||
"execute_on": [],
|
||||
"hook": ""
|
||||
},
|
||||
"setstat_mode": 0,
|
||||
"proxy_protocol": 0,
|
||||
"proxy_allowed": []
|
||||
},
|
||||
"sftpd": {
|
||||
"bind_port": 2022,
|
||||
"bind_address": "",
|
||||
"max_auth_tries": 0,
|
||||
"banner": "",
|
||||
"host_keys": [],
|
||||
"kex_algorithms": [],
|
||||
"ciphers": [],
|
||||
"macs": [],
|
||||
"trusted_user_ca_keys": [],
|
||||
"login_banner_file": "",
|
||||
"setstat_mode": 0,
|
||||
"enabled_ssh_commands": [
|
||||
"md5sum",
|
||||
"sha1sum",
|
||||
|
@ -25,9 +28,7 @@
|
|||
"pwd",
|
||||
"scp"
|
||||
],
|
||||
"keyboard_interactive_auth_hook": "",
|
||||
"proxy_protocol": 0,
|
||||
"proxy_allowed": []
|
||||
"keyboard_interactive_auth_hook": ""
|
||||
},
|
||||
"data_provider": {
|
||||
"driver": "sqlite",
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
)
|
||||
|
||||
// SetUmask sets umask on unix systems
|
||||
func SetUmask(umask int, configValue string) {
|
||||
logger.Debug(logSender, "", "set umask to %v (%v)", configValue, umask)
|
||||
syscall.Umask(umask)
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
package utils
|
||||
|
||||
import "github.com/drakkan/sftpgo/logger"
|
||||
|
||||
// SetUmask does nothing on windows
|
||||
func SetUmask(umask int, configValue string) {
|
||||
logger.Debug(logSender, "", "umask not available on windows, configured value %v (%v)", configValue, umask)
|
||||
}
|
|
@ -178,7 +178,7 @@ func (fs GCSFs) Create(name string, flag int) (*os.File, *PipeWriter, func(), er
|
|||
defer objectWriter.Close()
|
||||
n, err := io.Copy(objectWriter, r)
|
||||
r.CloseWithError(err) //nolint:errcheck // the returned error is always null
|
||||
p.Done(GetSFTPError(fs, err))
|
||||
p.Done(err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v", name, n, err)
|
||||
metrics.GCSTransferCompleted(n, 0, err)
|
||||
}()
|
||||
|
|
|
@ -206,7 +206,7 @@ func (fs S3Fs) Create(name string, flag int) (*os.File, *PipeWriter, func(), err
|
|||
u.PartSize = fs.config.UploadPartSize
|
||||
})
|
||||
r.CloseWithError(err) //nolint:errcheck // the returned error is always null
|
||||
p.Done(GetSFTPError(fs, err))
|
||||
p.Done(err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, response: %v, readed bytes: %v, err: %+v",
|
||||
name, response, r.GetReadedBytes(), err)
|
||||
metrics.S3TransferCompleted(r.GetReadedBytes(), 0, err)
|
||||
|
|
18
vfs/vfs.go
18
vfs/vfs.go
|
@ -12,7 +12,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/eikenb/pipeat"
|
||||
"github.com/pkg/sftp"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
)
|
||||
|
@ -154,6 +153,11 @@ func (p *PipeWriter) WriteAt(data []byte, off int64) (int, error) {
|
|||
return p.writer.WriteAt(data, off)
|
||||
}
|
||||
|
||||
// Write is a wrapper for pipeat Write
|
||||
func (p *PipeWriter) Write(data []byte) (int, error) {
|
||||
return p.writer.Write(data)
|
||||
}
|
||||
|
||||
// IsDirectory checks if a path exists and is a directory
|
||||
func IsDirectory(fs Fs, path string) (bool, error) {
|
||||
fileInfo, err := fs.Stat(path)
|
||||
|
@ -163,18 +167,6 @@ func IsDirectory(fs Fs, path string) (bool, error) {
|
|||
return fileInfo.IsDir(), err
|
||||
}
|
||||
|
||||
// GetSFTPError returns an sftp error from a filesystem error
|
||||
func GetSFTPError(fs Fs, err error) error {
|
||||
if fs.IsNotExist(err) {
|
||||
return sftp.ErrSSHFxNoSuchFile
|
||||
} else if fs.IsPermission(err) {
|
||||
return sftp.ErrSSHFxPermissionDenied
|
||||
} else if err != nil {
|
||||
return sftp.ErrSSHFxFailure
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsLocalOsFs returns true if fs is the local filesystem implementation
|
||||
func IsLocalOsFs(fs Fs) bool {
|
||||
return fs.Name() == osFsName
|
||||
|
|
Loading…
Reference in a new issue