2024-01-01 10:31:45 +00:00
|
|
|
// Copyright (C) 2019 Nicola Murino
|
2022-07-17 18:16:00 +00:00
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published
|
|
|
|
// by the Free Software Foundation, version 3.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
2023-01-03 09:18:30 +00:00
|
|
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
2022-07-17 18:16:00 +00:00
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
// Package common defines code shared among file transfer packages and protocols
|
|
|
|
package common
|
|
|
|
|
|
|
|
import (
|
2020-07-30 20:33:49 +00:00
|
|
|
"context"
|
2020-07-24 21:39:38 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"net"
|
2020-07-30 20:33:49 +00:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2020-07-24 21:39:38 +00:00
|
|
|
"os"
|
2020-07-30 20:33:49 +00:00
|
|
|
"os/exec"
|
|
|
|
"path/filepath"
|
2021-10-29 17:55:18 +00:00
|
|
|
"strconv"
|
2020-07-30 20:33:49 +00:00
|
|
|
"strings"
|
2020-07-24 21:39:38 +00:00
|
|
|
"sync"
|
2020-09-18 16:15:28 +00:00
|
|
|
"sync/atomic"
|
2020-07-24 21:39:38 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/pires/go-proxyproto"
|
2023-05-12 16:34:59 +00:00
|
|
|
"github.com/sftpgo/sdk/plugin/notifier"
|
2020-07-24 21:39:38 +00:00
|
|
|
|
2022-07-24 14:18:54 +00:00
|
|
|
"github.com/drakkan/sftpgo/v2/internal/command"
|
|
|
|
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
|
|
|
"github.com/drakkan/sftpgo/v2/internal/httpclient"
|
|
|
|
"github.com/drakkan/sftpgo/v2/internal/logger"
|
|
|
|
"github.com/drakkan/sftpgo/v2/internal/metric"
|
|
|
|
"github.com/drakkan/sftpgo/v2/internal/plugin"
|
2023-02-19 18:03:45 +00:00
|
|
|
"github.com/drakkan/sftpgo/v2/internal/smtp"
|
2022-07-24 14:18:54 +00:00
|
|
|
"github.com/drakkan/sftpgo/v2/internal/util"
|
|
|
|
"github.com/drakkan/sftpgo/v2/internal/vfs"
|
2020-07-24 21:39:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// constants
|
|
|
|
const (
|
2022-08-21 17:01:08 +00:00
|
|
|
logSender = "common"
|
|
|
|
uploadLogSender = "Upload"
|
|
|
|
downloadLogSender = "Download"
|
|
|
|
renameLogSender = "Rename"
|
|
|
|
rmdirLogSender = "Rmdir"
|
|
|
|
mkdirLogSender = "Mkdir"
|
|
|
|
symlinkLogSender = "Symlink"
|
|
|
|
removeLogSender = "Remove"
|
|
|
|
chownLogSender = "Chown"
|
|
|
|
chmodLogSender = "Chmod"
|
|
|
|
chtimesLogSender = "Chtimes"
|
2022-12-27 17:51:53 +00:00
|
|
|
copyLogSender = "Copy"
|
2022-08-21 17:01:08 +00:00
|
|
|
truncateLogSender = "Truncate"
|
|
|
|
operationDownload = "download"
|
|
|
|
operationUpload = "upload"
|
|
|
|
operationFirstDownload = "first-download"
|
|
|
|
operationFirstUpload = "first-upload"
|
|
|
|
operationDelete = "delete"
|
2022-12-27 17:51:53 +00:00
|
|
|
operationCopy = "copy"
|
2021-05-26 05:48:37 +00:00
|
|
|
// Pre-download action name
|
|
|
|
OperationPreDownload = "pre-download"
|
|
|
|
// Pre-upload action name
|
|
|
|
OperationPreUpload = "pre-upload"
|
|
|
|
operationPreDelete = "pre-delete"
|
|
|
|
operationRename = "rename"
|
2021-07-03 16:07:55 +00:00
|
|
|
operationMkdir = "mkdir"
|
|
|
|
operationRmdir = "rmdir"
|
2021-05-26 05:48:37 +00:00
|
|
|
// SSH command action name
|
2022-01-20 17:19:20 +00:00
|
|
|
OperationSSHCmd = "ssh_cmd"
|
|
|
|
chtimesFormat = "2006-01-02T15:04:05" // YYYY-MM-DDTHH:MM:SS
|
|
|
|
idleTimeoutCheckInterval = 3 * time.Minute
|
|
|
|
periodicTimeoutCheckInterval = 1 * time.Minute
|
2020-07-24 21:39:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Stat flags
|
|
|
|
const (
|
|
|
|
StatAttrUIDGID = 1
|
|
|
|
StatAttrPerms = 2
|
|
|
|
StatAttrTimes = 4
|
2020-08-20 11:54:36 +00:00
|
|
|
StatAttrSize = 8
|
2020-07-24 21:39:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Transfer types
|
|
|
|
const (
|
|
|
|
TransferUpload = iota
|
|
|
|
TransferDownload
|
|
|
|
)
|
|
|
|
|
|
|
|
// Supported protocols
|
|
|
|
const (
|
2021-10-03 08:22:47 +00:00
|
|
|
ProtocolSFTP = "SFTP"
|
|
|
|
ProtocolSCP = "SCP"
|
|
|
|
ProtocolSSH = "SSH"
|
|
|
|
ProtocolFTP = "FTP"
|
|
|
|
ProtocolWebDAV = "DAV"
|
|
|
|
ProtocolHTTP = "HTTP"
|
2021-11-06 13:13:20 +00:00
|
|
|
ProtocolHTTPShare = "HTTPShare"
|
2021-10-03 08:22:47 +00:00
|
|
|
ProtocolDataRetention = "DataRetention"
|
2022-02-19 09:53:35 +00:00
|
|
|
ProtocolOIDC = "OIDC"
|
2022-08-10 16:41:59 +00:00
|
|
|
protocolEventAction = "EventAction"
|
2020-07-24 21:39:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Upload modes
|
|
|
|
const (
|
2023-10-25 17:05:37 +00:00
|
|
|
UploadModeStandard = 0
|
|
|
|
UploadModeAtomic = 1
|
|
|
|
UploadModeAtomicWithResume = 2
|
|
|
|
UploadModeS3StoreOnError = 4
|
|
|
|
UploadModeGCSStoreOnError = 8
|
|
|
|
UploadModeAzureBlobStoreOnError = 16
|
2020-07-24 21:39:38 +00:00
|
|
|
)
|
|
|
|
|
2021-05-08 17:45:21 +00:00
|
|
|
func init() {
|
|
|
|
Connections.clients = clientsMap{
|
|
|
|
clients: make(map[string]int),
|
|
|
|
}
|
2022-04-14 17:07:41 +00:00
|
|
|
Connections.perUserConns = make(map[string]int)
|
2022-10-20 16:17:13 +00:00
|
|
|
Connections.mapping = make(map[string]int)
|
|
|
|
Connections.sshMapping = make(map[string]int)
|
2021-05-08 17:45:21 +00:00
|
|
|
}
|
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
// errors definitions
|
|
|
|
var (
|
2022-06-11 08:41:34 +00:00
|
|
|
ErrPermissionDenied = errors.New("permission denied")
|
|
|
|
ErrNotExist = errors.New("no such file or directory")
|
|
|
|
ErrOpUnsupported = errors.New("operation unsupported")
|
|
|
|
ErrGenericFailure = errors.New("failure")
|
|
|
|
ErrQuotaExceeded = errors.New("denying write due to space limit")
|
|
|
|
ErrReadQuotaExceeded = errors.New("denying read due to quota limit")
|
|
|
|
ErrConnectionDenied = errors.New("you are not allowed to connect")
|
|
|
|
ErrNoBinding = errors.New("no binding configured")
|
|
|
|
ErrCrtRevoked = errors.New("your certificate has been revoked")
|
|
|
|
ErrNoCredentials = errors.New("no credential provided")
|
|
|
|
ErrInternalFailure = errors.New("internal failure")
|
|
|
|
ErrTransferAborted = errors.New("transfer aborted")
|
2022-10-22 09:56:41 +00:00
|
|
|
ErrShuttingDown = errors.New("the service is shutting down")
|
2022-06-11 08:41:34 +00:00
|
|
|
errNoTransfer = errors.New("requested transfer not found")
|
|
|
|
errTransferMismatch = errors.New("transfer mismatch")
|
2020-07-24 21:39:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// Config is the configuration for the supported protocols
|
|
|
|
Config Configuration
|
|
|
|
// Connections is the list of active connections
|
2022-08-01 16:48:54 +00:00
|
|
|
Connections ActiveConnections
|
|
|
|
// QuotaScans is the list of active quota scans
|
2022-09-26 17:00:34 +00:00
|
|
|
QuotaScans ActiveScans
|
|
|
|
// ActiveMetadataChecks holds the active metadata checks
|
|
|
|
ActiveMetadataChecks MetadataChecks
|
|
|
|
transfersChecker TransfersChecker
|
|
|
|
supportedProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP, ProtocolWebDAV,
|
2022-02-19 09:53:35 +00:00
|
|
|
ProtocolHTTP, ProtocolHTTPShare, ProtocolOIDC}
|
2021-11-06 13:13:20 +00:00
|
|
|
disconnHookProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP}
|
2021-04-18 10:31:06 +00:00
|
|
|
// the map key is the protocol, for each protocol we can have multiple rate limiters
|
2023-01-25 17:49:03 +00:00
|
|
|
rateLimiters map[string][]*rateLimiter
|
|
|
|
isShuttingDown atomic.Bool
|
|
|
|
ftpLoginCommands = []string{"PASS", "USER"}
|
2020-07-24 21:39:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Initialize sets the common configuration
|
2022-01-30 10:42:36 +00:00
|
|
|
func Initialize(c Configuration, isShared int) error {
|
2022-10-22 09:56:41 +00:00
|
|
|
isShuttingDown.Store(false)
|
2023-08-08 16:30:42 +00:00
|
|
|
util.SetUmask(c.Umask)
|
2020-07-24 21:39:38 +00:00
|
|
|
Config = c
|
2022-05-31 16:22:18 +00:00
|
|
|
Config.Actions.ExecuteOn = util.RemoveDuplicates(Config.Actions.ExecuteOn, true)
|
|
|
|
Config.Actions.ExecuteSync = util.RemoveDuplicates(Config.Actions.ExecuteSync, true)
|
|
|
|
Config.ProxyAllowed = util.RemoveDuplicates(Config.ProxyAllowed, true)
|
2020-07-29 19:56:56 +00:00
|
|
|
Config.idleLoginTimeout = 2 * time.Minute
|
2020-07-24 21:39:38 +00:00
|
|
|
Config.idleTimeoutAsDuration = time.Duration(Config.IdleTimeout) * time.Minute
|
2023-02-19 18:03:45 +00:00
|
|
|
startPeriodicChecks(periodicTimeoutCheckInterval, isShared)
|
2021-01-02 13:05:09 +00:00
|
|
|
Config.defender = nil
|
2023-02-09 08:33:33 +00:00
|
|
|
Config.allowList = nil
|
|
|
|
Config.rateLimitersList = nil
|
2021-04-18 10:31:06 +00:00
|
|
|
rateLimiters = make(map[string][]*rateLimiter)
|
|
|
|
for _, rlCfg := range c.RateLimitersConfig {
|
|
|
|
if rlCfg.isEnabled() {
|
|
|
|
if err := rlCfg.validate(); err != nil {
|
2022-03-17 21:10:52 +00:00
|
|
|
return fmt.Errorf("rate limiters initialization error: %w", err)
|
2021-04-18 10:31:06 +00:00
|
|
|
}
|
|
|
|
rateLimiter := rlCfg.getLimiter()
|
|
|
|
for _, protocol := range rlCfg.Protocols {
|
|
|
|
rateLimiters[protocol] = append(rateLimiters[protocol], rateLimiter)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-02-09 08:33:33 +00:00
|
|
|
if len(rateLimiters) > 0 {
|
|
|
|
rateLimitersList, err := dataprovider.NewIPList(dataprovider.IPListTypeRateLimiterSafeList)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to initialize ratelimiters list: %w", err)
|
|
|
|
}
|
|
|
|
Config.rateLimitersList = rateLimitersList
|
|
|
|
}
|
2021-12-25 11:08:07 +00:00
|
|
|
if c.DefenderConfig.Enabled {
|
2022-05-19 17:49:51 +00:00
|
|
|
if !util.Contains(supportedDefenderDrivers, c.DefenderConfig.Driver) {
|
2023-01-25 17:49:03 +00:00
|
|
|
return fmt.Errorf("unsupported defender driver %q", c.DefenderConfig.Driver)
|
2021-12-25 11:08:07 +00:00
|
|
|
}
|
|
|
|
var defender Defender
|
|
|
|
var err error
|
|
|
|
switch c.DefenderConfig.Driver {
|
|
|
|
case DefenderDriverProvider:
|
|
|
|
defender, err = newDBDefender(&c.DefenderConfig)
|
|
|
|
default:
|
|
|
|
defender, err = newInMemoryDefender(&c.DefenderConfig)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("defender initialization error: %v", err)
|
|
|
|
}
|
|
|
|
logger.Info(logSender, "", "defender initialized with config %+v", c.DefenderConfig)
|
|
|
|
Config.defender = defender
|
|
|
|
}
|
2023-02-09 08:33:33 +00:00
|
|
|
if c.AllowListStatus > 0 {
|
|
|
|
allowList, err := dataprovider.NewIPList(dataprovider.IPListTypeAllowList)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to initialize the allow list: %w", err)
|
2022-03-17 21:10:52 +00:00
|
|
|
}
|
2023-02-09 08:33:33 +00:00
|
|
|
logger.Info(logSender, "", "allow list initialized")
|
|
|
|
Config.allowList = allowList
|
2022-03-17 21:10:52 +00:00
|
|
|
}
|
2023-03-15 17:44:08 +00:00
|
|
|
if err := c.initializeProxyProtocol(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-27 13:38:27 +00:00
|
|
|
vfs.SetTempPath(c.TempPath)
|
2021-05-31 19:45:29 +00:00
|
|
|
dataprovider.SetTempPath(c.TempPath)
|
2022-10-12 16:12:12 +00:00
|
|
|
vfs.SetAllowSelfConnections(c.AllowSelfConnections)
|
2023-01-06 11:33:50 +00:00
|
|
|
vfs.SetRenameMode(c.RenameMode)
|
2023-08-12 16:51:47 +00:00
|
|
|
vfs.SetReadMetadataMode(c.Metadata.Read)
|
2023-10-22 14:09:30 +00:00
|
|
|
vfs.SetResumeMaxSize(c.ResumeMaxSize)
|
2023-10-25 17:05:37 +00:00
|
|
|
vfs.SetUploadMode(c.UploadMode)
|
2022-10-12 16:12:12 +00:00
|
|
|
dataprovider.SetAllowSelfConnections(c.AllowSelfConnections)
|
2022-01-30 10:42:36 +00:00
|
|
|
transfersChecker = getTransfersChecker(isShared)
|
2021-04-18 10:31:06 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-10-22 09:56:41 +00:00
|
|
|
// CheckClosing returns an error if the service is closing
|
|
|
|
func CheckClosing() error {
|
|
|
|
if isShuttingDown.Load() {
|
|
|
|
return ErrShuttingDown
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForTransfers waits, for the specified grace time, for currently ongoing
|
|
|
|
// client-initiated transfer sessions to completes.
|
|
|
|
// A zero graceTime means no wait
|
|
|
|
func WaitForTransfers(graceTime int) {
|
|
|
|
if graceTime == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if isShuttingDown.Swap(true) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if activeHooks.Load() == 0 && getActiveConnections() == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
graceTimer := time.NewTimer(time.Duration(graceTime) * time.Second)
|
|
|
|
ticker := time.NewTicker(3 * time.Second)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
hooks := activeHooks.Load()
|
|
|
|
logger.Info(logSender, "", "active hooks: %d", hooks)
|
|
|
|
if hooks == 0 && getActiveConnections() == 0 {
|
|
|
|
logger.Info(logSender, "", "no more active connections, graceful shutdown")
|
|
|
|
ticker.Stop()
|
|
|
|
graceTimer.Stop()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-graceTimer.C:
|
|
|
|
logger.Info(logSender, "", "grace time expired, hard shutdown")
|
|
|
|
ticker.Stop()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getActiveConnections returns the number of connections with active transfers
|
|
|
|
func getActiveConnections() int {
|
|
|
|
var activeConns int
|
|
|
|
|
|
|
|
Connections.RLock()
|
|
|
|
for _, c := range Connections.connections {
|
|
|
|
if len(c.GetTransfers()) > 0 {
|
|
|
|
activeConns++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Connections.RUnlock()
|
|
|
|
|
|
|
|
logger.Info(logSender, "", "number of connections with active transfers: %d", activeConns)
|
|
|
|
return activeConns
|
|
|
|
}
|
|
|
|
|
2021-04-18 10:31:06 +00:00
|
|
|
// LimitRate blocks until all the configured rate limiters
|
|
|
|
// allow one event to happen.
|
|
|
|
// It returns an error if the time to wait exceeds the max
|
|
|
|
// allowed delay
|
2021-04-19 06:14:04 +00:00
|
|
|
func LimitRate(protocol, ip string) (time.Duration, error) {
|
2023-02-09 08:33:33 +00:00
|
|
|
if Config.rateLimitersList != nil {
|
|
|
|
isListed, _, err := Config.rateLimitersList.IsListed(ip, protocol)
|
|
|
|
if err == nil && isListed {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
}
|
2021-04-18 10:31:06 +00:00
|
|
|
for _, limiter := range rateLimiters[protocol] {
|
2023-02-09 08:33:33 +00:00
|
|
|
if delay, err := limiter.Wait(ip, protocol); err != nil {
|
|
|
|
logger.Debug(logSender, "", "protocol %s ip %s: %v", protocol, ip, err)
|
2021-04-19 06:14:04 +00:00
|
|
|
return delay, err
|
2021-04-18 10:31:06 +00:00
|
|
|
}
|
|
|
|
}
|
2021-04-19 06:14:04 +00:00
|
|
|
return 0, nil
|
2021-01-02 13:05:09 +00:00
|
|
|
}
|
|
|
|
|
2022-03-24 09:21:13 +00:00
|
|
|
// Reload reloads the whitelist, the IP filter plugin and the defender's block and safe lists
|
2022-03-17 21:10:52 +00:00
|
|
|
func Reload() error {
|
2022-03-24 09:21:13 +00:00
|
|
|
plugin.Handler.ReloadFilter()
|
2023-02-09 08:33:33 +00:00
|
|
|
return nil
|
2021-01-04 16:52:14 +00:00
|
|
|
}
|
|
|
|
|
2021-01-02 13:05:09 +00:00
|
|
|
// IsBanned returns true if the specified IP address is banned
|
2023-02-09 08:33:33 +00:00
|
|
|
func IsBanned(ip, protocol string) bool {
|
2023-02-13 12:45:45 +00:00
|
|
|
if plugin.Handler.IsIPBanned(ip, protocol) {
|
2022-03-23 09:58:01 +00:00
|
|
|
return true
|
|
|
|
}
|
2021-01-02 13:05:09 +00:00
|
|
|
if Config.defender == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-02-09 08:33:33 +00:00
|
|
|
return Config.defender.IsBanned(ip, protocol)
|
2021-01-02 13:05:09 +00:00
|
|
|
}
|
|
|
|
|
2021-01-02 18:33:24 +00:00
|
|
|
// GetDefenderBanTime returns the ban time for the given IP
|
|
|
|
// or nil if the IP is not banned or the defender is disabled
|
2021-12-25 11:08:07 +00:00
|
|
|
func GetDefenderBanTime(ip string) (*time.Time, error) {
|
2021-01-02 18:33:24 +00:00
|
|
|
if Config.defender == nil {
|
2021-12-25 11:08:07 +00:00
|
|
|
return nil, nil
|
2021-01-02 18:33:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Config.defender.GetBanTime(ip)
|
|
|
|
}
|
|
|
|
|
2021-06-07 19:52:43 +00:00
|
|
|
// GetDefenderHosts returns hosts that are banned or for which some violations have been detected
|
2022-01-16 11:09:17 +00:00
|
|
|
func GetDefenderHosts() ([]dataprovider.DefenderEntry, error) {
|
2021-06-07 19:52:43 +00:00
|
|
|
if Config.defender == nil {
|
2021-12-25 11:08:07 +00:00
|
|
|
return nil, nil
|
2021-06-07 19:52:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Config.defender.GetHosts()
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetDefenderHost returns a defender host by ip, if any
|
2022-01-16 11:09:17 +00:00
|
|
|
func GetDefenderHost(ip string) (dataprovider.DefenderEntry, error) {
|
2021-06-07 19:52:43 +00:00
|
|
|
if Config.defender == nil {
|
2022-01-16 11:09:17 +00:00
|
|
|
return dataprovider.DefenderEntry{}, errors.New("defender is disabled")
|
2021-06-07 19:52:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Config.defender.GetHost(ip)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteDefenderHost removes the specified IP address from the defender lists
|
|
|
|
func DeleteDefenderHost(ip string) bool {
|
2021-01-02 18:33:24 +00:00
|
|
|
if Config.defender == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-06-07 19:52:43 +00:00
|
|
|
return Config.defender.DeleteHost(ip)
|
2021-01-02 18:33:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetDefenderScore returns the score for the given IP
|
2021-12-25 11:08:07 +00:00
|
|
|
func GetDefenderScore(ip string) (int, error) {
|
2021-01-02 18:33:24 +00:00
|
|
|
if Config.defender == nil {
|
2021-12-25 11:08:07 +00:00
|
|
|
return 0, nil
|
2021-01-02 18:33:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Config.defender.GetScore(ip)
|
|
|
|
}
|
|
|
|
|
2021-01-02 13:05:09 +00:00
|
|
|
// AddDefenderEvent adds the specified defender event for the given IP
|
2023-02-09 08:33:33 +00:00
|
|
|
func AddDefenderEvent(ip, protocol string, event HostEvent) {
|
2021-01-02 13:05:09 +00:00
|
|
|
if Config.defender == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-02-09 08:33:33 +00:00
|
|
|
Config.defender.AddEvent(ip, protocol, event)
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
2023-02-19 18:03:45 +00:00
|
|
|
func startPeriodicChecks(duration time.Duration, isShared int) {
|
2022-08-01 16:48:54 +00:00
|
|
|
startEventScheduler()
|
|
|
|
spec := fmt.Sprintf("@every %s", duration)
|
|
|
|
_, err := eventScheduler.AddFunc(spec, Connections.checkTransfers)
|
|
|
|
util.PanicOnError(err)
|
|
|
|
logger.Info(logSender, "", "scheduled overquota transfers check, schedule %q", spec)
|
2023-02-19 18:03:45 +00:00
|
|
|
if isShared == 1 {
|
|
|
|
logger.Info(logSender, "", "add reload configs task")
|
|
|
|
_, err := eventScheduler.AddFunc("@every 10m", smtp.ReloadProviderConf)
|
|
|
|
util.PanicOnError(err)
|
|
|
|
}
|
2022-08-01 16:48:54 +00:00
|
|
|
if Config.IdleTimeout > 0 {
|
2022-01-20 17:19:20 +00:00
|
|
|
ratio := idleTimeoutCheckInterval / periodicTimeoutCheckInterval
|
2022-08-01 16:48:54 +00:00
|
|
|
spec = fmt.Sprintf("@every %s", duration*ratio)
|
|
|
|
_, err = eventScheduler.AddFunc(spec, Connections.checkIdles)
|
|
|
|
util.PanicOnError(err)
|
|
|
|
logger.Info(logSender, "", "scheduled idle connections check, schedule %q", spec)
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ActiveTransfer defines the interface for the current active transfers
|
|
|
|
type ActiveTransfer interface {
|
2022-01-20 17:19:20 +00:00
|
|
|
GetID() int64
|
2020-07-24 21:39:38 +00:00
|
|
|
GetType() int
|
|
|
|
GetSize() int64
|
2022-01-20 17:19:20 +00:00
|
|
|
GetDownloadedSize() int64
|
|
|
|
GetUploadedSize() int64
|
2020-07-24 21:39:38 +00:00
|
|
|
GetVirtualPath() string
|
|
|
|
GetStartTime() time.Time
|
2022-01-20 17:19:20 +00:00
|
|
|
SignalClose(err error)
|
2020-08-22 08:12:00 +00:00
|
|
|
Truncate(fsPath string, size int64) (int64, error)
|
2020-08-22 12:52:17 +00:00
|
|
|
GetRealFsPath(fsPath string) string
|
2021-11-26 18:00:44 +00:00
|
|
|
SetTimes(fsPath string, atime time.Time, mtime time.Time) bool
|
2022-01-20 17:19:20 +00:00
|
|
|
GetTruncatedSize() int64
|
2022-01-30 10:42:36 +00:00
|
|
|
HasSizeLimit() bool
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ActiveConnection defines the interface for the current active connections
|
|
|
|
type ActiveConnection interface {
|
|
|
|
GetID() string
|
|
|
|
GetUsername() string
|
2022-11-16 18:04:50 +00:00
|
|
|
GetRole() string
|
2022-04-14 17:07:41 +00:00
|
|
|
GetMaxSessions() int
|
2021-07-24 18:11:17 +00:00
|
|
|
GetLocalAddress() string
|
2020-07-24 21:39:38 +00:00
|
|
|
GetRemoteAddress() string
|
|
|
|
GetClientVersion() string
|
|
|
|
GetProtocol() string
|
|
|
|
GetConnectionTime() time.Time
|
|
|
|
GetLastActivity() time.Time
|
|
|
|
GetCommand() string
|
|
|
|
Disconnect() error
|
|
|
|
AddTransfer(t ActiveTransfer)
|
|
|
|
RemoveTransfer(t ActiveTransfer)
|
|
|
|
GetTransfers() []ConnectionTransfer
|
2022-01-20 17:19:20 +00:00
|
|
|
SignalTransferClose(transferID int64, err error)
|
2020-12-12 09:31:09 +00:00
|
|
|
CloseFS() error
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// StatAttributes defines the attributes for set stat commands
|
|
|
|
type StatAttributes struct {
|
|
|
|
Mode os.FileMode
|
|
|
|
Atime time.Time
|
|
|
|
Mtime time.Time
|
|
|
|
UID int
|
|
|
|
GID int
|
|
|
|
Flags int
|
2020-08-20 11:54:36 +00:00
|
|
|
Size int64
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
2023-02-09 08:33:33 +00:00
|
|
|
// ConnectionTransfer defines the trasfer details
|
2020-07-24 21:39:38 +00:00
|
|
|
type ConnectionTransfer struct {
|
2022-01-30 10:42:36 +00:00
|
|
|
ID int64 `json:"-"`
|
|
|
|
OperationType string `json:"operation_type"`
|
|
|
|
StartTime int64 `json:"start_time"`
|
|
|
|
Size int64 `json:"size"`
|
|
|
|
VirtualPath string `json:"path"`
|
|
|
|
HasSizeLimit bool `json:"-"`
|
|
|
|
ULSize int64 `json:"-"`
|
|
|
|
DLSize int64 `json:"-"`
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
2023-08-12 16:51:47 +00:00
|
|
|
// MetadataConfig defines how to handle metadata for cloud storage backends
|
|
|
|
type MetadataConfig struct {
|
|
|
|
// If not zero the metadata will be read before downloads and will be
|
|
|
|
// available in notifications
|
|
|
|
Read int `json:"read" mapstructure:"read"`
|
|
|
|
}
|
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
// Configuration defines configuration parameters common to all supported protocols
|
|
|
|
type Configuration struct {
|
|
|
|
// Maximum idle timeout as minutes. If a client is idle for a time that exceeds this setting it will be disconnected.
|
|
|
|
// 0 means disabled
|
|
|
|
IdleTimeout int `json:"idle_timeout" mapstructure:"idle_timeout"`
|
|
|
|
// UploadMode 0 means standard, the files are uploaded directly to the requested path.
|
|
|
|
// 1 means atomic: the files are uploaded to a temporary path and renamed to the requested path
|
|
|
|
// when the client ends the upload. Atomic mode avoid problems such as a web server that
|
|
|
|
// serves partial files when the files are being uploaded.
|
|
|
|
// In atomic mode if there is an upload error the temporary file is deleted and so the requested
|
|
|
|
// upload path will not contain a partial file.
|
|
|
|
// 2 means atomic with resume support: as atomic but if there is an upload error the temporary
|
|
|
|
// file is renamed to the requested path and not deleted, this way a client can reconnect and resume
|
|
|
|
// the upload.
|
2023-10-25 17:05:37 +00:00
|
|
|
// 4 means files for S3 backend are stored even if a client-side upload error is detected.
|
|
|
|
// 8 means files for Google Cloud Storage backend are stored even if a client-side upload error is detected.
|
|
|
|
// 16 means files for Azure Blob backend are stored even if a client-side upload error is detected.
|
2020-07-24 21:39:38 +00:00
|
|
|
UploadMode int `json:"upload_mode" mapstructure:"upload_mode"`
|
|
|
|
// Actions to execute for SFTP file operations and SSH commands
|
|
|
|
Actions ProtocolActions `json:"actions" mapstructure:"actions"`
|
|
|
|
// SetstatMode 0 means "normal mode": requests for changing permissions and owner/group are executed.
|
|
|
|
// 1 means "ignore mode": requests for changing permissions and owner/group are silently ignored.
|
2021-12-16 17:18:36 +00:00
|
|
|
// 2 means "ignore mode for cloud fs": requests for changing permissions and owner/group are
|
|
|
|
// silently ignored for cloud based filesystem such as S3, GCS, Azure Blob. Requests for changing
|
|
|
|
// modification times are ignored for cloud based filesystem if they are not supported.
|
2020-07-24 21:39:38 +00:00
|
|
|
SetstatMode int `json:"setstat_mode" mapstructure:"setstat_mode"`
|
2023-01-06 11:33:50 +00:00
|
|
|
// RenameMode defines how to handle directory renames. By default, renaming of non-empty directories
|
|
|
|
// is not allowed for cloud storage providers (S3, GCS, Azure Blob). Set to 1 to enable recursive
|
|
|
|
// renames for these providers, they may be slow, there is no atomic rename API like for local
|
|
|
|
// filesystem, so SFTPGo will recursively list the directory contents and do a rename for each entry
|
|
|
|
RenameMode int `json:"rename_mode" mapstructure:"rename_mode"`
|
2023-10-22 14:09:30 +00:00
|
|
|
// ResumeMaxSize defines the maximum size allowed, in bytes, to resume uploads on storage backends
|
|
|
|
// with immutable objects. By default, resuming uploads is not allowed for cloud storage providers
|
|
|
|
// (S3, GCS, Azure Blob) because SFTPGo must rewrite the entire file.
|
|
|
|
// Set to a value greater than 0 to allow resuming uploads of files smaller than or equal to the
|
|
|
|
// defined size.
|
|
|
|
ResumeMaxSize int64 `json:"resume_max_size" mapstructure:"resume_max_size"`
|
2021-05-27 13:38:27 +00:00
|
|
|
// TempPath defines the path for temporary files such as those used for atomic uploads or file pipes.
|
|
|
|
// If you set this option you must make sure that the defined path exists, is accessible for writing
|
|
|
|
// by the user running SFTPGo, and is on the same filesystem as the users home directories otherwise
|
|
|
|
// the renaming for atomic uploads will become a copy and therefore may take a long time.
|
|
|
|
// The temporary files are not namespaced. The default is generally fine. Leave empty for the default.
|
|
|
|
TempPath string `json:"temp_path" mapstructure:"temp_path"`
|
2020-07-24 21:39:38 +00:00
|
|
|
// Support for HAProxy PROXY protocol.
|
|
|
|
// If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGNIX, you can enable
|
|
|
|
// the proxy protocol. It provides a convenient way to safely transport connection information
|
|
|
|
// such as a client's address across multiple layers of NAT or TCP proxies to get the real
|
|
|
|
// client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported.
|
|
|
|
// - 0 means disabled
|
|
|
|
// - 1 means proxy protocol enabled. Proxy header will be used and requests without proxy header will be accepted.
|
|
|
|
// - 2 means proxy protocol required. Proxy header will be used and requests without proxy header will be rejected.
|
|
|
|
// If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too,
|
|
|
|
// for example for HAProxy add "send-proxy" or "send-proxy-v2" to each server configuration line.
|
|
|
|
ProxyProtocol int `json:"proxy_protocol" mapstructure:"proxy_protocol"`
|
|
|
|
// List of IP addresses and IP ranges allowed to send the proxy header.
|
|
|
|
// If proxy protocol is set to 1 and we receive a proxy header from an IP that is not in the list then the
|
|
|
|
// connection will be accepted and the header will be ignored.
|
|
|
|
// If proxy protocol is set to 2 and we receive a proxy header from an IP that is not in the list then the
|
|
|
|
// connection will be rejected.
|
2020-07-30 20:33:49 +00:00
|
|
|
ProxyAllowed []string `json:"proxy_allowed" mapstructure:"proxy_allowed"`
|
2023-03-15 17:44:08 +00:00
|
|
|
// List of IP addresses and IP ranges for which not to read the proxy header
|
|
|
|
ProxySkipped []string `json:"proxy_skipped" mapstructure:"proxy_skipped"`
|
2021-04-05 08:07:59 +00:00
|
|
|
// Absolute path to an external program or an HTTP URL to invoke as soon as SFTPGo starts.
|
|
|
|
// If you define an HTTP URL it will be invoked using a `GET` request.
|
|
|
|
// Please note that SFTPGo services may not yet be available when this hook is run.
|
|
|
|
// Leave empty do disable.
|
|
|
|
StartupHook string `json:"startup_hook" mapstructure:"startup_hook"`
|
2020-07-30 20:33:49 +00:00
|
|
|
// Absolute path to an external program or an HTTP URL to invoke after a user connects
|
|
|
|
// and before he tries to login. It allows you to reject the connection based on the source
|
|
|
|
// ip address. Leave empty do disable.
|
2020-12-15 18:29:30 +00:00
|
|
|
PostConnectHook string `json:"post_connect_hook" mapstructure:"post_connect_hook"`
|
2021-10-29 17:55:18 +00:00
|
|
|
// Absolute path to an external program or an HTTP URL to invoke after an SSH/FTP connection ends.
|
|
|
|
// Leave empty do disable.
|
|
|
|
PostDisconnectHook string `json:"post_disconnect_hook" mapstructure:"post_disconnect_hook"`
|
2021-10-03 13:17:49 +00:00
|
|
|
// Absolute path to an external program or an HTTP URL to invoke after a data retention check completes.
|
|
|
|
// Leave empty do disable.
|
|
|
|
DataRetentionHook string `json:"data_retention_hook" mapstructure:"data_retention_hook"`
|
2020-12-15 18:29:30 +00:00
|
|
|
// Maximum number of concurrent client connections. 0 means unlimited
|
2021-01-02 13:05:09 +00:00
|
|
|
MaxTotalConnections int `json:"max_total_connections" mapstructure:"max_total_connections"`
|
2021-05-08 17:45:21 +00:00
|
|
|
// Maximum number of concurrent client connections from the same host (IP). 0 means unlimited
|
|
|
|
MaxPerHostConnections int `json:"max_per_host_connections" mapstructure:"max_per_host_connections"`
|
2023-02-09 08:33:33 +00:00
|
|
|
// Defines the status of the global allow list. 0 means disabled, 1 enabled.
|
|
|
|
// If enabled, only the listed IPs/networks can access the configured services, all other
|
|
|
|
// client connections will be dropped before they even try to authenticate.
|
|
|
|
// Ensure to enable this setting only after adding some allowed ip/networks from the WebAdmin/REST API
|
|
|
|
AllowListStatus int `json:"allowlist_status" mapstructure:"allowlist_status"`
|
2022-10-12 16:12:12 +00:00
|
|
|
// Allow users on this instance to use other users/virtual folders on this instance as storage backend.
|
|
|
|
// Enable this setting if you know what you are doing.
|
|
|
|
AllowSelfConnections int `json:"allow_self_connections" mapstructure:"allow_self_connections"`
|
2021-01-02 13:05:09 +00:00
|
|
|
// Defender configuration
|
2021-04-18 10:31:06 +00:00
|
|
|
DefenderConfig DefenderConfig `json:"defender" mapstructure:"defender"`
|
|
|
|
// Rate limiter configurations
|
2023-08-08 16:30:42 +00:00
|
|
|
RateLimitersConfig []RateLimiterConfig `json:"rate_limiters" mapstructure:"rate_limiters"`
|
|
|
|
// Umask for new uploads. Leave blank to use the system default.
|
2023-08-12 16:51:47 +00:00
|
|
|
Umask string `json:"umask" mapstructure:"umask"`
|
|
|
|
// Metadata configuration
|
|
|
|
Metadata MetadataConfig `json:"metadata" mapstructure:"metadata"`
|
2020-07-24 21:39:38 +00:00
|
|
|
idleTimeoutAsDuration time.Duration
|
2020-07-29 19:56:56 +00:00
|
|
|
idleLoginTimeout time.Duration
|
2021-01-02 13:05:09 +00:00
|
|
|
defender Defender
|
2023-02-09 08:33:33 +00:00
|
|
|
allowList *dataprovider.IPList
|
|
|
|
rateLimitersList *dataprovider.IPList
|
2023-03-15 17:44:08 +00:00
|
|
|
proxyAllowed []func(net.IP) bool
|
|
|
|
proxySkipped []func(net.IP) bool
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsAtomicUploadEnabled returns true if atomic upload is enabled
|
|
|
|
func (c *Configuration) IsAtomicUploadEnabled() bool {
|
2023-10-25 17:05:37 +00:00
|
|
|
return c.UploadMode&UploadModeAtomic != 0 || c.UploadMode&UploadModeAtomicWithResume != 0
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
2023-03-15 17:44:08 +00:00
|
|
|
func (c *Configuration) initializeProxyProtocol() error {
|
|
|
|
if c.ProxyProtocol > 0 {
|
|
|
|
allowed, err := util.ParseAllowedIPAndRanges(c.ProxyAllowed)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("invalid proxy allowed: %w", err)
|
|
|
|
}
|
|
|
|
skipped, err := util.ParseAllowedIPAndRanges(c.ProxySkipped)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("invalid proxy skipped: %w", err)
|
|
|
|
}
|
|
|
|
Config.proxyAllowed = allowed
|
|
|
|
Config.proxySkipped = skipped
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
// GetProxyListener returns a wrapper for the given listener that supports the
|
2021-08-05 16:38:15 +00:00
|
|
|
// HAProxy Proxy Protocol
|
2020-07-24 21:39:38 +00:00
|
|
|
func (c *Configuration) GetProxyListener(listener net.Listener) (*proxyproto.Listener, error) {
|
|
|
|
if c.ProxyProtocol > 0 {
|
2023-03-15 17:44:08 +00:00
|
|
|
defaultPolicy := proxyproto.REQUIRE
|
|
|
|
if c.ProxyProtocol == 1 {
|
|
|
|
defaultPolicy = proxyproto.IGNORE
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
2023-03-15 17:44:08 +00:00
|
|
|
|
2021-08-05 16:38:15 +00:00
|
|
|
return &proxyproto.Listener{
|
2021-09-08 10:29:47 +00:00
|
|
|
Listener: listener,
|
2023-03-15 17:44:08 +00:00
|
|
|
Policy: getProxyPolicy(c.proxyAllowed, c.proxySkipped, defaultPolicy),
|
2022-05-07 10:50:49 +00:00
|
|
|
ReadHeaderTimeout: 10 * time.Second,
|
2021-08-05 16:38:15 +00:00
|
|
|
}, nil
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
2021-08-05 16:38:15 +00:00
|
|
|
return nil, errors.New("proxy protocol not configured")
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
2023-02-09 08:33:33 +00:00
|
|
|
// GetRateLimitersStatus returns the rate limiters status
|
|
|
|
func (c *Configuration) GetRateLimitersStatus() (bool, []string) {
|
|
|
|
enabled := false
|
|
|
|
var protocols []string
|
|
|
|
for _, rlCfg := range c.RateLimitersConfig {
|
|
|
|
if rlCfg.isEnabled() {
|
|
|
|
enabled = true
|
|
|
|
protocols = append(protocols, rlCfg.Protocols...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return enabled, util.RemoveDuplicates(protocols, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsAllowListEnabled returns true if the global allow list is enabled
|
|
|
|
func (c *Configuration) IsAllowListEnabled() bool {
|
|
|
|
return c.AllowListStatus > 0
|
|
|
|
}
|
|
|
|
|
2021-04-05 08:07:59 +00:00
|
|
|
// ExecuteStartupHook runs the startup hook if defined
|
|
|
|
func (c *Configuration) ExecuteStartupHook() error {
|
|
|
|
if c.StartupHook == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(c.StartupHook, "http") {
|
|
|
|
var url *url.URL
|
|
|
|
url, err := url.Parse(c.StartupHook)
|
|
|
|
if err != nil {
|
2023-02-27 18:02:43 +00:00
|
|
|
logger.Warn(logSender, "", "Invalid startup hook %q: %v", c.StartupHook, err)
|
2021-04-05 08:07:59 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
startTime := time.Now()
|
2021-05-25 06:36:01 +00:00
|
|
|
resp, err := httpclient.RetryableGet(url.String())
|
2021-04-05 08:07:59 +00:00
|
|
|
if err != nil {
|
|
|
|
logger.Warn(logSender, "", "Error executing startup hook: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
logger.Debug(logSender, "", "Startup hook executed, elapsed: %v, response code: %v", time.Since(startTime), resp.StatusCode)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if !filepath.IsAbs(c.StartupHook) {
|
2023-02-27 18:02:43 +00:00
|
|
|
err := fmt.Errorf("invalid startup hook %q", c.StartupHook)
|
|
|
|
logger.Warn(logSender, "", "Invalid startup hook %q", c.StartupHook)
|
2021-04-05 08:07:59 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
startTime := time.Now()
|
2022-09-20 11:58:44 +00:00
|
|
|
timeout, env, args := command.GetConfig(c.StartupHook, command.HookStartup)
|
2022-05-20 17:30:54 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
2021-04-05 08:07:59 +00:00
|
|
|
defer cancel()
|
2022-05-20 17:30:54 +00:00
|
|
|
|
2022-09-20 11:58:44 +00:00
|
|
|
cmd := exec.CommandContext(ctx, c.StartupHook, args...)
|
2022-05-20 17:30:54 +00:00
|
|
|
cmd.Env = env
|
2021-04-05 08:07:59 +00:00
|
|
|
err := cmd.Run()
|
2023-02-26 14:15:34 +00:00
|
|
|
logger.Debug(logSender, "", "Startup hook executed, elapsed: %s, error: %v", time.Since(startTime), err)
|
2021-04-05 08:07:59 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-10-29 17:55:18 +00:00
|
|
|
func (c *Configuration) executePostDisconnectHook(remoteAddr, protocol, username, connID string, connectionTime time.Time) {
|
2022-08-10 16:41:59 +00:00
|
|
|
startNewHook()
|
|
|
|
defer hookEnded()
|
|
|
|
|
2021-10-29 17:55:18 +00:00
|
|
|
ipAddr := util.GetIPFromRemoteAddress(remoteAddr)
|
|
|
|
connDuration := int64(time.Since(connectionTime) / time.Millisecond)
|
|
|
|
|
|
|
|
if strings.HasPrefix(c.PostDisconnectHook, "http") {
|
|
|
|
var url *url.URL
|
|
|
|
url, err := url.Parse(c.PostDisconnectHook)
|
|
|
|
if err != nil {
|
2023-02-27 18:02:43 +00:00
|
|
|
logger.Warn(protocol, connID, "Invalid post disconnect hook %q: %v", c.PostDisconnectHook, err)
|
2021-10-29 17:55:18 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
q := url.Query()
|
|
|
|
q.Add("ip", ipAddr)
|
|
|
|
q.Add("protocol", protocol)
|
|
|
|
q.Add("username", username)
|
|
|
|
q.Add("connection_duration", strconv.FormatInt(connDuration, 10))
|
|
|
|
url.RawQuery = q.Encode()
|
|
|
|
startTime := time.Now()
|
|
|
|
resp, err := httpclient.RetryableGet(url.String())
|
|
|
|
respCode := 0
|
|
|
|
if err == nil {
|
|
|
|
respCode = resp.StatusCode
|
|
|
|
resp.Body.Close()
|
|
|
|
}
|
|
|
|
logger.Debug(protocol, connID, "Post disconnect hook response code: %v, elapsed: %v, err: %v",
|
|
|
|
respCode, time.Since(startTime), err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !filepath.IsAbs(c.PostDisconnectHook) {
|
2023-02-27 18:02:43 +00:00
|
|
|
logger.Debug(protocol, connID, "invalid post disconnect hook %q", c.PostDisconnectHook)
|
2021-10-29 17:55:18 +00:00
|
|
|
return
|
|
|
|
}
|
2022-09-20 11:58:44 +00:00
|
|
|
timeout, env, args := command.GetConfig(c.PostDisconnectHook, command.HookPostDisconnect)
|
2022-05-20 17:30:54 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
2021-10-29 17:55:18 +00:00
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
startTime := time.Now()
|
2022-09-20 11:58:44 +00:00
|
|
|
cmd := exec.CommandContext(ctx, c.PostDisconnectHook, args...)
|
2022-05-20 17:30:54 +00:00
|
|
|
cmd.Env = append(env,
|
2023-02-26 14:15:34 +00:00
|
|
|
fmt.Sprintf("SFTPGO_CONNECTION_IP=%s", ipAddr),
|
|
|
|
fmt.Sprintf("SFTPGO_CONNECTION_USERNAME=%s", username),
|
|
|
|
fmt.Sprintf("SFTPGO_CONNECTION_DURATION=%d", connDuration),
|
|
|
|
fmt.Sprintf("SFTPGO_CONNECTION_PROTOCOL=%s", protocol))
|
2021-10-29 17:55:18 +00:00
|
|
|
err := cmd.Run()
|
2023-02-26 14:15:34 +00:00
|
|
|
logger.Debug(protocol, connID, "Post disconnect hook executed, elapsed: %s error: %v", time.Since(startTime), err)
|
2021-10-29 17:55:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Configuration) checkPostDisconnectHook(remoteAddr, protocol, username, connID string, connectionTime time.Time) {
|
|
|
|
if c.PostDisconnectHook == "" {
|
|
|
|
return
|
|
|
|
}
|
2022-05-19 17:49:51 +00:00
|
|
|
if !util.Contains(disconnHookProtocols, protocol) {
|
2021-10-29 17:55:18 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
go c.executePostDisconnectHook(remoteAddr, protocol, username, connID, connectionTime)
|
|
|
|
}
|
|
|
|
|
2020-07-30 20:33:49 +00:00
|
|
|
// ExecutePostConnectHook executes the post connect hook if defined
|
2021-01-02 13:05:09 +00:00
|
|
|
func (c *Configuration) ExecutePostConnectHook(ipAddr, protocol string) error {
|
2021-01-05 08:50:22 +00:00
|
|
|
if c.PostConnectHook == "" {
|
2020-07-30 20:33:49 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(c.PostConnectHook, "http") {
|
|
|
|
var url *url.URL
|
|
|
|
url, err := url.Parse(c.PostConnectHook)
|
|
|
|
if err != nil {
|
2023-02-27 18:02:43 +00:00
|
|
|
logger.Warn(protocol, "", "Login from ip %q denied, invalid post connect hook %q: %v",
|
2021-01-02 13:05:09 +00:00
|
|
|
ipAddr, c.PostConnectHook, err)
|
2023-02-28 17:01:09 +00:00
|
|
|
return getPermissionDeniedError(protocol)
|
2020-07-30 20:33:49 +00:00
|
|
|
}
|
|
|
|
q := url.Query()
|
2021-01-02 13:05:09 +00:00
|
|
|
q.Add("ip", ipAddr)
|
2020-07-30 20:33:49 +00:00
|
|
|
q.Add("protocol", protocol)
|
|
|
|
url.RawQuery = q.Encode()
|
|
|
|
|
2021-05-25 06:36:01 +00:00
|
|
|
resp, err := httpclient.RetryableGet(url.String())
|
2020-07-30 20:33:49 +00:00
|
|
|
if err != nil {
|
2023-02-27 18:02:43 +00:00
|
|
|
logger.Warn(protocol, "", "Login from ip %q denied, error executing post connect hook: %v", ipAddr, err)
|
2023-02-28 17:01:09 +00:00
|
|
|
return getPermissionDeniedError(protocol)
|
2020-07-30 20:33:49 +00:00
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
2023-02-27 18:02:43 +00:00
|
|
|
logger.Warn(protocol, "", "Login from ip %q denied, post connect hook response code: %v", ipAddr, resp.StatusCode)
|
2023-02-28 17:01:09 +00:00
|
|
|
return getPermissionDeniedError(protocol)
|
2020-07-30 20:33:49 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if !filepath.IsAbs(c.PostConnectHook) {
|
2023-02-27 18:02:43 +00:00
|
|
|
err := fmt.Errorf("invalid post connect hook %q", c.PostConnectHook)
|
|
|
|
logger.Warn(protocol, "", "Login from ip %q denied: %v", ipAddr, err)
|
2023-02-28 17:01:09 +00:00
|
|
|
return getPermissionDeniedError(protocol)
|
2020-07-30 20:33:49 +00:00
|
|
|
}
|
2022-09-20 11:58:44 +00:00
|
|
|
timeout, env, args := command.GetConfig(c.PostConnectHook, command.HookPostConnect)
|
2022-05-20 17:30:54 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
2020-07-30 20:33:49 +00:00
|
|
|
defer cancel()
|
2022-05-20 17:30:54 +00:00
|
|
|
|
2022-09-20 11:58:44 +00:00
|
|
|
cmd := exec.CommandContext(ctx, c.PostConnectHook, args...)
|
2022-05-20 17:30:54 +00:00
|
|
|
cmd.Env = append(env,
|
2023-02-26 14:15:34 +00:00
|
|
|
fmt.Sprintf("SFTPGO_CONNECTION_IP=%s", ipAddr),
|
|
|
|
fmt.Sprintf("SFTPGO_CONNECTION_PROTOCOL=%s", protocol))
|
2020-07-30 20:33:49 +00:00
|
|
|
err := cmd.Run()
|
|
|
|
if err != nil {
|
2023-02-26 14:15:34 +00:00
|
|
|
logger.Warn(protocol, "", "Login from ip %q denied, connect hook error: %v", ipAddr, err)
|
2023-02-28 17:01:09 +00:00
|
|
|
return getPermissionDeniedError(protocol)
|
2020-07-30 20:33:49 +00:00
|
|
|
}
|
2023-02-28 17:01:09 +00:00
|
|
|
return nil
|
2020-07-30 20:33:49 +00:00
|
|
|
}
|
|
|
|
|
2023-03-15 17:44:08 +00:00
|
|
|
func getProxyPolicy(allowed, skipped []func(net.IP) bool, def proxyproto.Policy) proxyproto.PolicyFunc {
|
|
|
|
return func(upstream net.Addr) (proxyproto.Policy, error) {
|
|
|
|
upstreamIP, err := util.GetIPFromNetAddr(upstream)
|
|
|
|
if err != nil {
|
|
|
|
// something is wrong with the source IP, better reject the connection
|
|
|
|
return proxyproto.REJECT, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, skippedFrom := range skipped {
|
|
|
|
if skippedFrom(upstreamIP) {
|
|
|
|
return proxyproto.SKIP, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, allowFrom := range allowed {
|
|
|
|
if allowFrom(upstreamIP) {
|
2023-05-20 14:08:57 +00:00
|
|
|
if def == proxyproto.REQUIRE {
|
|
|
|
return proxyproto.REQUIRE, nil
|
|
|
|
}
|
2023-03-15 17:44:08 +00:00
|
|
|
return proxyproto.USE, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return def, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-18 16:15:28 +00:00
|
|
|
// SSHConnection defines an ssh connection.
|
|
|
|
// Each SSH connection can open several channels for SFTP or SSH commands
|
|
|
|
type SSHConnection struct {
|
|
|
|
id string
|
|
|
|
conn net.Conn
|
2022-08-30 13:47:41 +00:00
|
|
|
lastActivity atomic.Int64
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewSSHConnection returns a new SSHConnection
|
|
|
|
func NewSSHConnection(id string, conn net.Conn) *SSHConnection {
|
2022-08-30 13:47:41 +00:00
|
|
|
c := &SSHConnection{
|
|
|
|
id: id,
|
|
|
|
conn: conn,
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
2022-08-30 13:47:41 +00:00
|
|
|
c.lastActivity.Store(time.Now().UnixNano())
|
|
|
|
return c
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetID returns the ID for this SSHConnection
|
|
|
|
func (c *SSHConnection) GetID() string {
|
|
|
|
return c.id
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateLastActivity updates last activity for this connection
|
|
|
|
func (c *SSHConnection) UpdateLastActivity() {
|
2022-08-30 13:47:41 +00:00
|
|
|
c.lastActivity.Store(time.Now().UnixNano())
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetLastActivity returns the last connection activity
|
|
|
|
func (c *SSHConnection) GetLastActivity() time.Time {
|
2022-08-30 13:47:41 +00:00
|
|
|
return time.Unix(0, c.lastActivity.Load())
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the underlying network connection
|
|
|
|
func (c *SSHConnection) Close() error {
|
|
|
|
return c.conn.Close()
|
|
|
|
}
|
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
// ActiveConnections holds the currect active connections with the associated transfers
|
|
|
|
type ActiveConnections struct {
|
2021-05-08 17:45:21 +00:00
|
|
|
// clients contains both authenticated and estabilished connections and the ones waiting
|
|
|
|
// for authentication
|
2022-01-20 17:19:20 +00:00
|
|
|
clients clientsMap
|
2022-08-30 13:47:41 +00:00
|
|
|
transfersCheckStatus atomic.Bool
|
2020-07-24 21:39:38 +00:00
|
|
|
sync.RWMutex
|
2020-09-18 16:15:28 +00:00
|
|
|
connections []ActiveConnection
|
2022-10-20 16:17:13 +00:00
|
|
|
mapping map[string]int
|
2020-09-18 16:15:28 +00:00
|
|
|
sshConnections []*SSHConnection
|
2022-10-20 16:17:13 +00:00
|
|
|
sshMapping map[string]int
|
2022-04-14 17:07:41 +00:00
|
|
|
perUserConns map[string]int
|
|
|
|
}
|
|
|
|
|
|
|
|
// internal method, must be called within a locked block
|
|
|
|
func (conns *ActiveConnections) addUserConnection(username string) {
|
|
|
|
if username == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
conns.perUserConns[username]++
|
|
|
|
}
|
|
|
|
|
|
|
|
// internal method, must be called within a locked block
|
|
|
|
func (conns *ActiveConnections) removeUserConnection(username string) {
|
|
|
|
if username == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if val, ok := conns.perUserConns[username]; ok {
|
|
|
|
conns.perUserConns[username]--
|
|
|
|
if val > 1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(conns.perUserConns, username)
|
|
|
|
}
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetActiveSessions returns the number of active sessions for the given username.
|
|
|
|
// We return the open sessions for any protocol
|
|
|
|
func (conns *ActiveConnections) GetActiveSessions(username string) int {
|
|
|
|
conns.RLock()
|
|
|
|
defer conns.RUnlock()
|
|
|
|
|
2022-04-14 17:07:41 +00:00
|
|
|
return conns.perUserConns[username]
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add adds a new connection to the active ones
|
2022-04-14 17:07:41 +00:00
|
|
|
func (conns *ActiveConnections) Add(c ActiveConnection) error {
|
2020-07-24 21:39:38 +00:00
|
|
|
conns.Lock()
|
|
|
|
defer conns.Unlock()
|
|
|
|
|
2022-04-14 17:07:41 +00:00
|
|
|
if username := c.GetUsername(); username != "" {
|
|
|
|
if maxSessions := c.GetMaxSessions(); maxSessions > 0 {
|
|
|
|
if val := conns.perUserConns[username]; val >= maxSessions {
|
|
|
|
return fmt.Errorf("too many open sessions: %d/%d", val, maxSessions)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
conns.addUserConnection(username)
|
|
|
|
}
|
2022-10-20 16:17:13 +00:00
|
|
|
conns.mapping[c.GetID()] = len(conns.connections)
|
2020-07-24 21:39:38 +00:00
|
|
|
conns.connections = append(conns.connections, c)
|
2021-07-11 13:26:51 +00:00
|
|
|
metric.UpdateActiveConnectionsSize(len(conns.connections))
|
2022-10-20 16:17:13 +00:00
|
|
|
logger.Debug(c.GetProtocol(), c.GetID(), "connection added, local address %q, remote address %q, num open connections: %d",
|
2021-07-24 18:11:17 +00:00
|
|
|
c.GetLocalAddress(), c.GetRemoteAddress(), len(conns.connections))
|
2022-04-14 17:07:41 +00:00
|
|
|
return nil
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
2020-07-29 19:56:56 +00:00
|
|
|
// Swap replaces an existing connection with the given one.
|
|
|
|
// This method is useful if you have to change some connection details
|
|
|
|
// for example for FTP is used to update the connection once the user
|
|
|
|
// authenticates
|
|
|
|
func (conns *ActiveConnections) Swap(c ActiveConnection) error {
|
|
|
|
conns.Lock()
|
|
|
|
defer conns.Unlock()
|
|
|
|
|
2022-10-20 16:17:13 +00:00
|
|
|
if idx, ok := conns.mapping[c.GetID()]; ok {
|
|
|
|
conn := conns.connections[idx]
|
|
|
|
conns.removeUserConnection(conn.GetUsername())
|
|
|
|
if username := c.GetUsername(); username != "" {
|
|
|
|
if maxSessions := c.GetMaxSessions(); maxSessions > 0 {
|
|
|
|
if val, ok := conns.perUserConns[username]; ok && val >= maxSessions {
|
|
|
|
conns.addUserConnection(conn.GetUsername())
|
|
|
|
return fmt.Errorf("too many open sessions: %d/%d", val, maxSessions)
|
2022-04-14 17:07:41 +00:00
|
|
|
}
|
|
|
|
}
|
2022-10-20 16:17:13 +00:00
|
|
|
conns.addUserConnection(username)
|
2020-07-29 19:56:56 +00:00
|
|
|
}
|
2022-10-20 16:17:13 +00:00
|
|
|
err := conn.CloseFS()
|
|
|
|
conns.connections[idx] = c
|
|
|
|
logger.Debug(logSender, c.GetID(), "connection swapped, close fs error: %v", err)
|
|
|
|
conn = nil
|
|
|
|
return nil
|
2020-07-29 19:56:56 +00:00
|
|
|
}
|
2022-10-20 16:17:13 +00:00
|
|
|
|
2020-07-29 19:56:56 +00:00
|
|
|
return errors.New("connection to swap not found")
|
|
|
|
}
|
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
// Remove removes a connection from the active ones
|
2020-07-29 19:56:56 +00:00
|
|
|
func (conns *ActiveConnections) Remove(connectionID string) {
|
2020-07-24 21:39:38 +00:00
|
|
|
conns.Lock()
|
|
|
|
defer conns.Unlock()
|
|
|
|
|
2022-10-20 16:17:13 +00:00
|
|
|
if idx, ok := conns.mapping[connectionID]; ok {
|
|
|
|
conn := conns.connections[idx]
|
|
|
|
err := conn.CloseFS()
|
|
|
|
lastIdx := len(conns.connections) - 1
|
|
|
|
conns.connections[idx] = conns.connections[lastIdx]
|
|
|
|
conns.connections[lastIdx] = nil
|
|
|
|
conns.connections = conns.connections[:lastIdx]
|
|
|
|
delete(conns.mapping, connectionID)
|
|
|
|
if idx != lastIdx {
|
|
|
|
conns.mapping[conns.connections[idx].GetID()] = idx
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
2022-10-20 16:17:13 +00:00
|
|
|
conns.removeUserConnection(conn.GetUsername())
|
|
|
|
metric.UpdateActiveConnectionsSize(lastIdx)
|
2023-01-25 17:49:03 +00:00
|
|
|
logger.Debug(conn.GetProtocol(), conn.GetID(), "connection removed, local address %q, remote address %q close fs error: %v, num open connections: %d",
|
2022-10-20 16:17:13 +00:00
|
|
|
conn.GetLocalAddress(), conn.GetRemoteAddress(), err, lastIdx)
|
2023-01-25 17:49:03 +00:00
|
|
|
if conn.GetProtocol() == ProtocolFTP && conn.GetUsername() == "" && !util.Contains(ftpLoginCommands, conn.GetCommand()) {
|
2022-10-20 16:17:13 +00:00
|
|
|
ip := util.GetIPFromRemoteAddress(conn.GetRemoteAddress())
|
2023-05-12 16:34:59 +00:00
|
|
|
logger.ConnectionFailedLog("", ip, dataprovider.LoginMethodNoAuthTried, ProtocolFTP,
|
|
|
|
dataprovider.ErrNoAuthTried.Error())
|
|
|
|
metric.AddNoAuthTried()
|
2023-02-09 08:33:33 +00:00
|
|
|
AddDefenderEvent(ip, ProtocolFTP, HostEventNoLoginTried)
|
2023-05-12 16:34:59 +00:00
|
|
|
dataprovider.ExecutePostLoginHook(&dataprovider.User{}, dataprovider.LoginMethodNoAuthTried, ip,
|
|
|
|
ProtocolFTP, dataprovider.ErrNoAuthTried)
|
|
|
|
plugin.Handler.NotifyLogEvent(notifier.LogEventTypeNoLoginTried, ProtocolFTP, "", ip, "",
|
|
|
|
dataprovider.ErrNoAuthTried)
|
2022-10-20 16:17:13 +00:00
|
|
|
}
|
|
|
|
Config.checkPostDisconnectHook(conn.GetRemoteAddress(), conn.GetProtocol(), conn.GetUsername(),
|
|
|
|
conn.GetID(), conn.GetConnectionTime())
|
|
|
|
return
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
2022-10-20 16:17:13 +00:00
|
|
|
|
2023-01-19 17:33:04 +00:00
|
|
|
logger.Debug(logSender, "", "connection id %q to remove not found!", connectionID)
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes an active connection.
|
|
|
|
// It returns true on success
|
2022-11-16 18:04:50 +00:00
|
|
|
func (conns *ActiveConnections) Close(connectionID, role string) bool {
|
2020-07-24 21:39:38 +00:00
|
|
|
conns.RLock()
|
2022-10-20 16:17:13 +00:00
|
|
|
|
|
|
|
var result bool
|
|
|
|
|
|
|
|
if idx, ok := conns.mapping[connectionID]; ok {
|
|
|
|
c := conns.connections[idx]
|
|
|
|
|
2022-11-16 18:04:50 +00:00
|
|
|
if role == "" || c.GetRole() == role {
|
|
|
|
defer func(conn ActiveConnection) {
|
|
|
|
err := conn.Disconnect()
|
|
|
|
logger.Debug(conn.GetProtocol(), conn.GetID(), "close connection requested, close err: %v", err)
|
|
|
|
}(c)
|
|
|
|
result = true
|
|
|
|
}
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
conns.RUnlock()
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2020-09-18 16:15:28 +00:00
|
|
|
// AddSSHConnection adds a new ssh connection to the active ones
|
|
|
|
func (conns *ActiveConnections) AddSSHConnection(c *SSHConnection) {
|
|
|
|
conns.Lock()
|
|
|
|
defer conns.Unlock()
|
|
|
|
|
2022-10-20 16:17:13 +00:00
|
|
|
conns.sshMapping[c.GetID()] = len(conns.sshConnections)
|
2020-09-18 16:15:28 +00:00
|
|
|
conns.sshConnections = append(conns.sshConnections, c)
|
2022-10-20 16:17:13 +00:00
|
|
|
logger.Debug(logSender, c.GetID(), "ssh connection added, num open connections: %d", len(conns.sshConnections))
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveSSHConnection removes a connection from the active ones
|
|
|
|
func (conns *ActiveConnections) RemoveSSHConnection(connectionID string) {
|
|
|
|
conns.Lock()
|
|
|
|
defer conns.Unlock()
|
|
|
|
|
2022-10-20 16:17:13 +00:00
|
|
|
if idx, ok := conns.sshMapping[connectionID]; ok {
|
|
|
|
lastIdx := len(conns.sshConnections) - 1
|
|
|
|
conns.sshConnections[idx] = conns.sshConnections[lastIdx]
|
|
|
|
conns.sshConnections[lastIdx] = nil
|
|
|
|
conns.sshConnections = conns.sshConnections[:lastIdx]
|
|
|
|
delete(conns.sshMapping, connectionID)
|
|
|
|
if idx != lastIdx {
|
|
|
|
conns.sshMapping[conns.sshConnections[idx].GetID()] = idx
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
2022-10-20 16:17:13 +00:00
|
|
|
logger.Debug(logSender, connectionID, "ssh connection removed, num open ssh connections: %d", lastIdx)
|
|
|
|
return
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
2022-10-20 16:17:13 +00:00
|
|
|
logger.Warn(logSender, "", "ssh connection to remove with id %q not found!", connectionID)
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
|
|
|
|
2020-11-07 17:05:47 +00:00
|
|
|
func (conns *ActiveConnections) checkIdles() {
|
2020-07-24 21:39:38 +00:00
|
|
|
conns.RLock()
|
|
|
|
|
2020-09-18 16:15:28 +00:00
|
|
|
for _, sshConn := range conns.sshConnections {
|
|
|
|
idleTime := time.Since(sshConn.GetLastActivity())
|
|
|
|
if idleTime > Config.idleTimeoutAsDuration {
|
2022-03-30 08:59:25 +00:00
|
|
|
// we close an SSH connection if it has no active connections associated
|
|
|
|
idToMatch := fmt.Sprintf("_%s_", sshConn.GetID())
|
2020-09-18 16:15:28 +00:00
|
|
|
toClose := true
|
|
|
|
for _, conn := range conns.connections {
|
|
|
|
if strings.Contains(conn.GetID(), idToMatch) {
|
2022-03-30 08:59:25 +00:00
|
|
|
if time.Since(conn.GetLastActivity()) <= Config.idleTimeoutAsDuration {
|
|
|
|
toClose = false
|
|
|
|
break
|
|
|
|
}
|
2020-09-18 16:15:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if toClose {
|
|
|
|
defer func(c *SSHConnection) {
|
|
|
|
err := c.Close()
|
|
|
|
logger.Debug(logSender, c.GetID(), "close idle SSH connection, idle time: %v, close err: %v",
|
|
|
|
time.Since(c.GetLastActivity()), err)
|
|
|
|
}(sshConn)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
for _, c := range conns.connections {
|
|
|
|
idleTime := time.Since(c.GetLastActivity())
|
2021-01-05 08:50:22 +00:00
|
|
|
isUnauthenticatedFTPUser := (c.GetProtocol() == ProtocolFTP && c.GetUsername() == "")
|
2020-07-29 19:56:56 +00:00
|
|
|
|
|
|
|
if idleTime > Config.idleTimeoutAsDuration || (isUnauthenticatedFTPUser && idleTime > Config.idleLoginTimeout) {
|
2022-08-30 15:14:57 +00:00
|
|
|
defer func(conn ActiveConnection) {
|
2020-07-29 19:56:56 +00:00
|
|
|
err := conn.Disconnect()
|
2023-02-27 18:02:43 +00:00
|
|
|
logger.Debug(conn.GetProtocol(), conn.GetID(), "close idle connection, idle time: %v, username: %q close err: %v",
|
2020-09-18 16:15:28 +00:00
|
|
|
time.Since(conn.GetLastActivity()), conn.GetUsername(), err)
|
2022-08-30 15:14:57 +00:00
|
|
|
}(c)
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
conns.RUnlock()
|
|
|
|
}
|
|
|
|
|
2022-01-20 17:19:20 +00:00
|
|
|
func (conns *ActiveConnections) checkTransfers() {
|
2022-08-30 13:47:41 +00:00
|
|
|
if conns.transfersCheckStatus.Load() {
|
2022-01-20 17:19:20 +00:00
|
|
|
logger.Warn(logSender, "", "the previous transfer check is still running, skipping execution")
|
|
|
|
return
|
|
|
|
}
|
2022-08-30 13:47:41 +00:00
|
|
|
conns.transfersCheckStatus.Store(true)
|
|
|
|
defer conns.transfersCheckStatus.Store(false)
|
2022-01-20 17:19:20 +00:00
|
|
|
|
2022-01-30 10:42:36 +00:00
|
|
|
conns.RLock()
|
2022-01-20 17:19:20 +00:00
|
|
|
|
2022-01-30 10:42:36 +00:00
|
|
|
if len(conns.connections) < 2 {
|
|
|
|
conns.RUnlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var wg sync.WaitGroup
|
2022-01-20 17:19:20 +00:00
|
|
|
logger.Debug(logSender, "", "start concurrent transfers check")
|
|
|
|
|
|
|
|
// update the current size for transfers to monitors
|
|
|
|
for _, c := range conns.connections {
|
|
|
|
for _, t := range c.GetTransfers() {
|
2022-01-30 10:42:36 +00:00
|
|
|
if t.HasSizeLimit {
|
2022-01-20 17:19:20 +00:00
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
go func(transfer ConnectionTransfer, connID string) {
|
|
|
|
defer wg.Done()
|
2022-01-30 10:42:36 +00:00
|
|
|
transfersChecker.UpdateTransferCurrentSizes(transfer.ULSize, transfer.DLSize, transfer.ID, connID)
|
2022-01-20 17:19:20 +00:00
|
|
|
}(t, c.GetID())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
conns.RUnlock()
|
|
|
|
logger.Debug(logSender, "", "waiting for the update of the transfers current size")
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
logger.Debug(logSender, "", "getting overquota transfers")
|
|
|
|
overquotaTransfers := transfersChecker.GetOverquotaTransfers()
|
|
|
|
logger.Debug(logSender, "", "number of overquota transfers: %v", len(overquotaTransfers))
|
|
|
|
if len(overquotaTransfers) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
conns.RLock()
|
|
|
|
defer conns.RUnlock()
|
|
|
|
|
|
|
|
for _, c := range conns.connections {
|
|
|
|
for _, overquotaTransfer := range overquotaTransfers {
|
|
|
|
if c.GetID() == overquotaTransfer.ConnID {
|
2023-02-27 18:02:43 +00:00
|
|
|
logger.Info(logSender, c.GetID(), "user %q is overquota, try to close transfer id %v",
|
2022-01-20 17:19:20 +00:00
|
|
|
c.GetUsername(), overquotaTransfer.TransferID)
|
2022-01-30 10:42:36 +00:00
|
|
|
var err error
|
|
|
|
if overquotaTransfer.TransferType == TransferDownload {
|
|
|
|
err = getReadQuotaExceededError(c.GetProtocol())
|
|
|
|
} else {
|
|
|
|
err = getQuotaExceededError(c.GetProtocol())
|
|
|
|
}
|
|
|
|
c.SignalTransferClose(overquotaTransfer.TransferID, err)
|
2022-01-20 17:19:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
logger.Debug(logSender, "", "transfers check completed")
|
|
|
|
}
|
|
|
|
|
2021-05-08 17:45:21 +00:00
|
|
|
// AddClientConnection stores a new client connection
|
|
|
|
func (conns *ActiveConnections) AddClientConnection(ipAddr string) {
|
|
|
|
conns.clients.add(ipAddr)
|
2021-04-20 16:12:16 +00:00
|
|
|
}
|
|
|
|
|
2021-05-08 17:45:21 +00:00
|
|
|
// RemoveClientConnection removes a disconnected client from the tracked ones
|
|
|
|
func (conns *ActiveConnections) RemoveClientConnection(ipAddr string) {
|
|
|
|
conns.clients.remove(ipAddr)
|
2021-04-20 16:12:16 +00:00
|
|
|
}
|
|
|
|
|
2021-05-11 06:04:57 +00:00
|
|
|
// GetClientConnections returns the total number of client connections
|
|
|
|
func (conns *ActiveConnections) GetClientConnections() int32 {
|
|
|
|
return conns.clients.getTotal()
|
|
|
|
}
|
|
|
|
|
2022-10-22 09:56:41 +00:00
|
|
|
// IsNewConnectionAllowed returns an error if the maximum number of concurrent allowed
|
|
|
|
// connections is exceeded or a whitelist is defined and the specified ipAddr is not listed
|
|
|
|
// or the service is shutting down
|
2023-02-09 08:33:33 +00:00
|
|
|
func (conns *ActiveConnections) IsNewConnectionAllowed(ipAddr, protocol string) error {
|
2022-10-22 09:56:41 +00:00
|
|
|
if isShuttingDown.Load() {
|
|
|
|
return ErrShuttingDown
|
|
|
|
}
|
2023-02-09 08:33:33 +00:00
|
|
|
if Config.allowList != nil {
|
|
|
|
isListed, _, err := Config.allowList.IsListed(ipAddr, protocol)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error(logSender, "", "unable to query allow list, connection denied, ip %q, protocol %s, err: %v",
|
|
|
|
ipAddr, protocol, err)
|
|
|
|
return ErrConnectionDenied
|
|
|
|
}
|
|
|
|
if !isListed {
|
2022-10-22 09:56:41 +00:00
|
|
|
return ErrConnectionDenied
|
2022-03-17 21:10:52 +00:00
|
|
|
}
|
|
|
|
}
|
2021-05-08 17:45:21 +00:00
|
|
|
if Config.MaxTotalConnections == 0 && Config.MaxPerHostConnections == 0 {
|
2022-10-22 09:56:41 +00:00
|
|
|
return nil
|
2020-12-15 18:29:30 +00:00
|
|
|
}
|
|
|
|
|
2021-05-08 17:45:21 +00:00
|
|
|
if Config.MaxPerHostConnections > 0 {
|
|
|
|
if total := conns.clients.getTotalFrom(ipAddr); total > Config.MaxPerHostConnections {
|
2022-10-22 09:56:41 +00:00
|
|
|
logger.Info(logSender, "", "active connections from %s %d/%d", ipAddr, total, Config.MaxPerHostConnections)
|
2023-02-09 08:33:33 +00:00
|
|
|
AddDefenderEvent(ipAddr, protocol, HostEventLimitExceeded)
|
2022-10-22 09:56:41 +00:00
|
|
|
return ErrConnectionDenied
|
2021-05-08 17:45:21 +00:00
|
|
|
}
|
2021-04-20 16:12:16 +00:00
|
|
|
}
|
|
|
|
|
2021-05-08 17:45:21 +00:00
|
|
|
if Config.MaxTotalConnections > 0 {
|
|
|
|
if total := conns.clients.getTotal(); total > int32(Config.MaxTotalConnections) {
|
2022-10-22 09:56:41 +00:00
|
|
|
logger.Info(logSender, "", "active client connections %d/%d", total, Config.MaxTotalConnections)
|
|
|
|
return ErrConnectionDenied
|
2021-05-08 17:45:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// on a single SFTP connection we could have multiple SFTP channels or commands
|
|
|
|
// so we check the estabilished connections too
|
|
|
|
|
|
|
|
conns.RLock()
|
|
|
|
defer conns.RUnlock()
|
|
|
|
|
2022-10-22 09:56:41 +00:00
|
|
|
if sess := len(conns.connections); sess >= Config.MaxTotalConnections {
|
|
|
|
logger.Info(logSender, "", "active client sessions %d/%d", sess, Config.MaxTotalConnections)
|
|
|
|
return ErrConnectionDenied
|
|
|
|
}
|
2021-05-08 17:45:21 +00:00
|
|
|
}
|
2020-12-15 18:29:30 +00:00
|
|
|
|
2022-10-22 09:56:41 +00:00
|
|
|
return nil
|
2020-12-15 18:29:30 +00:00
|
|
|
}
|
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
// GetStats returns stats for active connections
|
2022-11-16 18:04:50 +00:00
|
|
|
func (conns *ActiveConnections) GetStats(role string) []ConnectionStatus {
|
2020-07-24 21:39:38 +00:00
|
|
|
conns.RLock()
|
|
|
|
defer conns.RUnlock()
|
|
|
|
|
2022-01-16 11:09:17 +00:00
|
|
|
stats := make([]ConnectionStatus, 0, len(conns.connections))
|
2022-09-25 17:48:55 +00:00
|
|
|
node := dataprovider.GetNodeName()
|
2020-07-24 21:39:38 +00:00
|
|
|
for _, c := range conns.connections {
|
2022-11-16 18:04:50 +00:00
|
|
|
if role == "" || c.GetRole() == role {
|
|
|
|
stat := ConnectionStatus{
|
|
|
|
Username: c.GetUsername(),
|
|
|
|
ConnectionID: c.GetID(),
|
|
|
|
ClientVersion: c.GetClientVersion(),
|
|
|
|
RemoteAddress: c.GetRemoteAddress(),
|
|
|
|
ConnectionTime: util.GetTimeAsMsSinceEpoch(c.GetConnectionTime()),
|
|
|
|
LastActivity: util.GetTimeAsMsSinceEpoch(c.GetLastActivity()),
|
2024-01-20 14:35:05 +00:00
|
|
|
CurrentTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
2022-11-16 18:04:50 +00:00
|
|
|
Protocol: c.GetProtocol(),
|
|
|
|
Command: c.GetCommand(),
|
|
|
|
Transfers: c.GetTransfers(),
|
|
|
|
Node: node,
|
|
|
|
}
|
|
|
|
stats = append(stats, stat)
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return stats
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConnectionStatus returns the status for an active connection
|
|
|
|
type ConnectionStatus struct {
|
|
|
|
// Logged in username
|
|
|
|
Username string `json:"username"`
|
|
|
|
// Unique identifier for the connection
|
|
|
|
ConnectionID string `json:"connection_id"`
|
|
|
|
// client's version string
|
|
|
|
ClientVersion string `json:"client_version,omitempty"`
|
|
|
|
// Remote address for this connection
|
|
|
|
RemoteAddress string `json:"remote_address"`
|
|
|
|
// Connection time as unix timestamp in milliseconds
|
|
|
|
ConnectionTime int64 `json:"connection_time"`
|
|
|
|
// Last activity as unix timestamp in milliseconds
|
|
|
|
LastActivity int64 `json:"last_activity"`
|
2024-01-20 14:35:05 +00:00
|
|
|
// Current time as unix timestamp in milliseconds
|
|
|
|
CurrentTime int64 `json:"current_time"`
|
2020-08-11 21:56:10 +00:00
|
|
|
// Protocol for this connection
|
2020-07-24 21:39:38 +00:00
|
|
|
Protocol string `json:"protocol"`
|
|
|
|
// active uploads/downloads
|
|
|
|
Transfers []ConnectionTransfer `json:"active_transfers,omitempty"`
|
2020-11-24 12:44:57 +00:00
|
|
|
// SSH command or WebDAV method
|
2020-08-11 21:56:10 +00:00
|
|
|
Command string `json:"command,omitempty"`
|
2022-09-25 17:48:55 +00:00
|
|
|
// Node identifier, omitted for single node installations
|
|
|
|
Node string `json:"node,omitempty"`
|
2020-07-24 21:39:38 +00:00
|
|
|
}
|
|
|
|
|
2022-09-26 17:00:34 +00:00
|
|
|
// ActiveQuotaScan defines an active quota scan for a user
|
2022-08-01 16:48:54 +00:00
|
|
|
type ActiveQuotaScan struct {
|
|
|
|
// Username to which the quota scan refers
|
|
|
|
Username string `json:"username"`
|
|
|
|
// quota scan start time as unix timestamp in milliseconds
|
2022-11-16 18:04:50 +00:00
|
|
|
StartTime int64 `json:"start_time"`
|
|
|
|
Role string `json:"-"`
|
2022-08-01 16:48:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ActiveVirtualFolderQuotaScan defines an active quota scan for a virtual folder
|
|
|
|
type ActiveVirtualFolderQuotaScan struct {
|
|
|
|
// folder name to which the quota scan refers
|
|
|
|
Name string `json:"name"`
|
|
|
|
// quota scan start time as unix timestamp in milliseconds
|
|
|
|
StartTime int64 `json:"start_time"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// ActiveScans holds the active quota scans
|
|
|
|
type ActiveScans struct {
|
|
|
|
sync.RWMutex
|
|
|
|
UserScans []ActiveQuotaScan
|
|
|
|
FolderScans []ActiveVirtualFolderQuotaScan
|
|
|
|
}
|
|
|
|
|
2022-09-26 17:00:34 +00:00
|
|
|
// GetUsersQuotaScans returns the active users quota scans
|
2022-11-16 18:04:50 +00:00
|
|
|
func (s *ActiveScans) GetUsersQuotaScans(role string) []ActiveQuotaScan {
|
2022-08-01 16:48:54 +00:00
|
|
|
s.RLock()
|
|
|
|
defer s.RUnlock()
|
|
|
|
|
2022-11-16 18:04:50 +00:00
|
|
|
scans := make([]ActiveQuotaScan, 0, len(s.UserScans))
|
|
|
|
for _, scan := range s.UserScans {
|
|
|
|
if role == "" || role == scan.Role {
|
|
|
|
scans = append(scans, ActiveQuotaScan{
|
|
|
|
Username: scan.Username,
|
|
|
|
StartTime: scan.StartTime,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-01 16:48:54 +00:00
|
|
|
return scans
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddUserQuotaScan adds a user to the ones with active quota scans.
|
|
|
|
// Returns false if the user has a quota scan already running
|
2022-11-16 18:04:50 +00:00
|
|
|
func (s *ActiveScans) AddUserQuotaScan(username, role string) bool {
|
2022-08-01 16:48:54 +00:00
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
for _, scan := range s.UserScans {
|
|
|
|
if scan.Username == username {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.UserScans = append(s.UserScans, ActiveQuotaScan{
|
|
|
|
Username: username,
|
|
|
|
StartTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
2022-11-16 18:04:50 +00:00
|
|
|
Role: role,
|
2022-08-01 16:48:54 +00:00
|
|
|
})
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveUserQuotaScan removes a user from the ones with active quota scans.
|
|
|
|
// Returns false if the user has no active quota scans
|
|
|
|
func (s *ActiveScans) RemoveUserQuotaScan(username string) bool {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
for idx, scan := range s.UserScans {
|
|
|
|
if scan.Username == username {
|
|
|
|
lastIdx := len(s.UserScans) - 1
|
|
|
|
s.UserScans[idx] = s.UserScans[lastIdx]
|
|
|
|
s.UserScans = s.UserScans[:lastIdx]
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetVFoldersQuotaScans returns the active quota scans for virtual folders
|
|
|
|
func (s *ActiveScans) GetVFoldersQuotaScans() []ActiveVirtualFolderQuotaScan {
|
|
|
|
s.RLock()
|
|
|
|
defer s.RUnlock()
|
|
|
|
scans := make([]ActiveVirtualFolderQuotaScan, len(s.FolderScans))
|
|
|
|
copy(scans, s.FolderScans)
|
|
|
|
return scans
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddVFolderQuotaScan adds a virtual folder to the ones with active quota scans.
|
|
|
|
// Returns false if the folder has a quota scan already running
|
|
|
|
func (s *ActiveScans) AddVFolderQuotaScan(folderName string) bool {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
for _, scan := range s.FolderScans {
|
|
|
|
if scan.Name == folderName {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.FolderScans = append(s.FolderScans, ActiveVirtualFolderQuotaScan{
|
|
|
|
Name: folderName,
|
|
|
|
StartTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
|
|
|
})
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveVFolderQuotaScan removes a folder from the ones with active quota scans.
|
|
|
|
// Returns false if the folder has no active quota scans
|
|
|
|
func (s *ActiveScans) RemoveVFolderQuotaScan(folderName string) bool {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
for idx, scan := range s.FolderScans {
|
|
|
|
if scan.Name == folderName {
|
|
|
|
lastIdx := len(s.FolderScans) - 1
|
|
|
|
s.FolderScans[idx] = s.FolderScans[lastIdx]
|
|
|
|
s.FolderScans = s.FolderScans[:lastIdx]
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
2022-09-26 17:00:34 +00:00
|
|
|
|
|
|
|
// MetadataCheck defines an active metadata check
|
|
|
|
type MetadataCheck struct {
|
|
|
|
// Username to which the metadata check refers
|
|
|
|
Username string `json:"username"`
|
|
|
|
// check start time as unix timestamp in milliseconds
|
2022-11-16 18:04:50 +00:00
|
|
|
StartTime int64 `json:"start_time"`
|
|
|
|
Role string `json:"-"`
|
2022-09-26 17:00:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// MetadataChecks holds the active metadata checks
|
|
|
|
type MetadataChecks struct {
|
|
|
|
sync.RWMutex
|
|
|
|
checks []MetadataCheck
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get returns the active metadata checks
|
2022-11-16 18:04:50 +00:00
|
|
|
func (c *MetadataChecks) Get(role string) []MetadataCheck {
|
2022-09-26 17:00:34 +00:00
|
|
|
c.RLock()
|
|
|
|
defer c.RUnlock()
|
|
|
|
|
2022-11-16 18:04:50 +00:00
|
|
|
checks := make([]MetadataCheck, 0, len(c.checks))
|
|
|
|
for _, check := range c.checks {
|
|
|
|
if role == "" || role == check.Role {
|
|
|
|
checks = append(checks, MetadataCheck{
|
|
|
|
Username: check.Username,
|
|
|
|
StartTime: check.StartTime,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2022-09-26 17:00:34 +00:00
|
|
|
|
|
|
|
return checks
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add adds a user to the ones with active metadata checks.
|
|
|
|
// Return false if a metadata check is already active for the specified user
|
2022-11-16 18:04:50 +00:00
|
|
|
func (c *MetadataChecks) Add(username, role string) bool {
|
2022-09-26 17:00:34 +00:00
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
|
|
|
|
for idx := range c.checks {
|
|
|
|
if c.checks[idx].Username == username {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.checks = append(c.checks, MetadataCheck{
|
|
|
|
Username: username,
|
|
|
|
StartTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
2022-11-16 18:04:50 +00:00
|
|
|
Role: role,
|
2022-09-26 17:00:34 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes a user from the ones with active metadata checks
|
|
|
|
func (c *MetadataChecks) Remove(username string) bool {
|
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
|
|
|
|
for idx := range c.checks {
|
|
|
|
if c.checks[idx].Username == username {
|
|
|
|
lastIdx := len(c.checks) - 1
|
|
|
|
c.checks[idx] = c.checks[lastIdx]
|
|
|
|
c.checks = c.checks[:lastIdx]
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|