2020-01-19 12:58:55 +00:00
|
|
|
// Package vfs provides local and remote filesystems support
|
2020-01-19 06:41:05 +00:00
|
|
|
package vfs
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2020-01-31 18:04:00 +00:00
|
|
|
"fmt"
|
2020-10-25 07:18:48 +00:00
|
|
|
"net/url"
|
2020-01-19 06:41:05 +00:00
|
|
|
"os"
|
2020-01-19 22:23:09 +00:00
|
|
|
"path"
|
2020-06-26 21:38:29 +00:00
|
|
|
"path/filepath"
|
2020-01-19 06:41:05 +00:00
|
|
|
"runtime"
|
2020-01-19 22:23:09 +00:00
|
|
|
"strings"
|
2020-01-19 06:41:05 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/eikenb/pipeat"
|
2020-05-06 17:36:34 +00:00
|
|
|
|
|
|
|
"github.com/drakkan/sftpgo/logger"
|
2020-01-19 06:41:05 +00:00
|
|
|
)
|
|
|
|
|
2020-10-25 07:18:48 +00:00
|
|
|
const dirMimeType = "inode/directory"
|
|
|
|
|
2020-01-31 18:04:00 +00:00
|
|
|
// Fs defines the interface for filesystem backends
|
2020-01-19 06:41:05 +00:00
|
|
|
type Fs interface {
|
|
|
|
Name() string
|
|
|
|
ConnectionID() string
|
|
|
|
Stat(name string) (os.FileInfo, error)
|
|
|
|
Lstat(name string) (os.FileInfo, error)
|
2020-08-03 16:03:09 +00:00
|
|
|
Open(name string, offset int64) (*os.File, *pipeat.PipeReaderAt, func(), error)
|
2020-05-19 17:17:43 +00:00
|
|
|
Create(name string, flag int) (*os.File, *PipeWriter, func(), error)
|
2020-01-19 06:41:05 +00:00
|
|
|
Rename(source, target string) error
|
|
|
|
Remove(name string, isDir bool) error
|
|
|
|
Mkdir(name string) error
|
|
|
|
Symlink(source, target string) error
|
|
|
|
Chown(name string, uid int, gid int) error
|
|
|
|
Chmod(name string, mode os.FileMode) error
|
|
|
|
Chtimes(name string, atime, mtime time.Time) error
|
2020-08-20 11:54:36 +00:00
|
|
|
Truncate(name string, size int64) error
|
2020-01-19 06:41:05 +00:00
|
|
|
ReadDir(dirname string) ([]os.FileInfo, error)
|
2020-08-22 12:52:17 +00:00
|
|
|
Readlink(name string) (string, error)
|
2020-01-19 06:41:05 +00:00
|
|
|
IsUploadResumeSupported() bool
|
|
|
|
IsAtomicUploadSupported() bool
|
2020-01-19 12:58:55 +00:00
|
|
|
CheckRootPath(username string, uid int, gid int) bool
|
|
|
|
ResolvePath(sftpPath string) (string, error)
|
2020-01-19 06:41:05 +00:00
|
|
|
IsNotExist(err error) bool
|
|
|
|
IsPermission(err error) bool
|
2020-01-19 12:58:55 +00:00
|
|
|
ScanRootDirContents() (int, int64, error)
|
2020-06-07 21:30:18 +00:00
|
|
|
GetDirSize(dirname string) (int, int64, error)
|
2020-01-19 06:41:05 +00:00
|
|
|
GetAtomicUploadPath(name string) string
|
2020-01-19 22:23:09 +00:00
|
|
|
GetRelativePath(name string) string
|
2020-06-26 21:38:29 +00:00
|
|
|
Walk(root string, walkFn filepath.WalkFunc) error
|
2020-01-19 06:41:05 +00:00
|
|
|
Join(elem ...string) string
|
2020-07-31 17:24:57 +00:00
|
|
|
HasVirtualFolders() bool
|
2020-01-19 06:41:05 +00:00
|
|
|
}
|
|
|
|
|
2020-08-11 21:56:10 +00:00
|
|
|
// MimeTyper defines an optional interface to get the content type
|
|
|
|
type MimeTyper interface {
|
|
|
|
GetMimeType(name string) (string, error)
|
|
|
|
}
|
|
|
|
|
2020-06-26 21:38:29 +00:00
|
|
|
var errUnsupported = errors.New("Not supported")
|
|
|
|
|
2020-06-16 20:49:18 +00:00
|
|
|
// QuotaCheckResult defines the result for a quota check
|
|
|
|
type QuotaCheckResult struct {
|
|
|
|
HasSpace bool
|
|
|
|
AllowedSize int64
|
|
|
|
AllowedFiles int
|
|
|
|
UsedSize int64
|
|
|
|
UsedFiles int
|
|
|
|
QuotaSize int64
|
|
|
|
QuotaFiles int
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetRemainingSize returns the remaining allowed size
|
|
|
|
func (q *QuotaCheckResult) GetRemainingSize() int64 {
|
|
|
|
if q.QuotaSize > 0 {
|
|
|
|
return q.QuotaSize - q.UsedSize
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2020-06-18 20:38:03 +00:00
|
|
|
// GetRemainingFiles returns the remaining allowed files
|
2020-06-16 20:49:18 +00:00
|
|
|
func (q *QuotaCheckResult) GetRemainingFiles() int {
|
|
|
|
if q.QuotaFiles > 0 {
|
|
|
|
return q.QuotaFiles - q.UsedFiles
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2020-05-23 09:58:05 +00:00
|
|
|
// S3FsConfig defines the configuration for S3 based filesystem
|
|
|
|
type S3FsConfig struct {
|
|
|
|
Bucket string `json:"bucket,omitempty"`
|
|
|
|
// KeyPrefix is similar to a chroot directory for local filesystem.
|
2020-06-07 21:30:18 +00:00
|
|
|
// If specified then the SFTP user will only see objects that starts
|
|
|
|
// with this prefix and so you can restrict access to a specific
|
2020-05-23 09:58:05 +00:00
|
|
|
// folder. The prefix, if not empty, must not start with "/" and must
|
|
|
|
// end with "/".
|
|
|
|
// If empty the whole bucket contents will be available
|
|
|
|
KeyPrefix string `json:"key_prefix,omitempty"`
|
|
|
|
Region string `json:"region,omitempty"`
|
|
|
|
AccessKey string `json:"access_key,omitempty"`
|
|
|
|
AccessSecret string `json:"access_secret,omitempty"`
|
|
|
|
Endpoint string `json:"endpoint,omitempty"`
|
|
|
|
StorageClass string `json:"storage_class,omitempty"`
|
|
|
|
// The buffer size (in MB) to use for multipart uploads. The minimum allowed part size is 5MB,
|
|
|
|
// and if this value is set to zero, the default value (5MB) for the AWS SDK will be used.
|
|
|
|
// The minimum allowed value is 5.
|
|
|
|
// Please note that if the upload bandwidth between the SFTP client and SFTPGo is greater than
|
|
|
|
// the upload bandwidth between SFTPGo and S3 then the SFTP client have to wait for the upload
|
|
|
|
// of the last parts to S3 after it ends the file upload to SFTPGo, and it may time out.
|
|
|
|
// Keep this in mind if you customize these parameters.
|
|
|
|
UploadPartSize int64 `json:"upload_part_size,omitempty"`
|
|
|
|
// How many parts are uploaded in parallel
|
|
|
|
UploadConcurrency int `json:"upload_concurrency,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// GCSFsConfig defines the configuration for Google Cloud Storage based filesystem
|
|
|
|
type GCSFsConfig struct {
|
|
|
|
Bucket string `json:"bucket,omitempty"`
|
|
|
|
// KeyPrefix is similar to a chroot directory for local filesystem.
|
2020-06-07 21:30:18 +00:00
|
|
|
// If specified then the SFTP user will only see objects that starts
|
|
|
|
// with this prefix and so you can restrict access to a specific
|
2020-05-23 09:58:05 +00:00
|
|
|
// folder. The prefix, if not empty, must not start with "/" and must
|
|
|
|
// end with "/".
|
|
|
|
// If empty the whole bucket contents will be available
|
|
|
|
KeyPrefix string `json:"key_prefix,omitempty"`
|
|
|
|
CredentialFile string `json:"-"`
|
2020-10-22 08:42:40 +00:00
|
|
|
Credentials []byte `json:"credentials,omitempty"`
|
2020-05-23 09:58:05 +00:00
|
|
|
AutomaticCredentials int `json:"automatic_credentials,omitempty"`
|
|
|
|
StorageClass string `json:"storage_class,omitempty"`
|
|
|
|
}
|
|
|
|
|
2020-10-25 07:18:48 +00:00
|
|
|
// AzBlobFsConfig defines the configuration for Azure Blob Storage based filesystem
|
|
|
|
type AzBlobFsConfig struct {
|
|
|
|
Container string `json:"container,omitempty"`
|
|
|
|
// Storage Account Name, leave blank to use SAS URL
|
|
|
|
AccountName string `json:"account_name,omitempty"`
|
|
|
|
// Storage Account Key leave blank to use SAS URL.
|
|
|
|
// The access key is stored encrypted (AES-256-GCM)
|
|
|
|
AccountKey string `json:"account_key,omitempty"`
|
|
|
|
// Optional endpoint. Default is "blob.core.windows.net".
|
|
|
|
// If you use the emulator the endpoint must include the protocol,
|
|
|
|
// for example "http://127.0.0.1:10000"
|
|
|
|
Endpoint string `json:"endpoint,omitempty"`
|
|
|
|
// Shared access signature URL, leave blank if using account/key
|
|
|
|
SASURL string `json:"sas_url,omitempty"`
|
|
|
|
// KeyPrefix is similar to a chroot directory for local filesystem.
|
|
|
|
// If specified then the SFTPGo userd will only see objects that starts
|
|
|
|
// with this prefix and so you can restrict access to a specific
|
|
|
|
// folder. The prefix, if not empty, must not start with "/" and must
|
|
|
|
// end with "/".
|
|
|
|
// If empty the whole bucket contents will be available
|
|
|
|
KeyPrefix string `json:"key_prefix,omitempty"`
|
|
|
|
// The buffer size (in MB) to use for multipart uploads.
|
|
|
|
// If this value is set to zero, the default value (1MB) for the Azure SDK will be used.
|
|
|
|
// Please note that if the upload bandwidth between the SFTPGo client and SFTPGo server is
|
|
|
|
// greater than the upload bandwidth between SFTPGo and Azure then the SFTP client have
|
|
|
|
// to wait for the upload of the last parts to Azure after it ends the file upload to SFTPGo,
|
|
|
|
// and it may time out.
|
|
|
|
// Keep this in mind if you customize these parameters.
|
|
|
|
UploadPartSize int64 `json:"upload_part_size,omitempty"`
|
|
|
|
// How many parts are uploaded in parallel
|
|
|
|
UploadConcurrency int `json:"upload_concurrency,omitempty"`
|
|
|
|
// Set to true if you use an Azure emulator such as Azurite
|
|
|
|
UseEmulator bool `json:"use_emulator,omitempty"`
|
|
|
|
}
|
|
|
|
|
2020-05-19 17:17:43 +00:00
|
|
|
// PipeWriter defines a wrapper for pipeat.PipeWriterAt.
|
|
|
|
type PipeWriter struct {
|
|
|
|
writer *pipeat.PipeWriterAt
|
|
|
|
err error
|
|
|
|
done chan bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPipeWriter initializes a new PipeWriter
|
|
|
|
func NewPipeWriter(w *pipeat.PipeWriterAt) *PipeWriter {
|
|
|
|
return &PipeWriter{
|
|
|
|
writer: w,
|
|
|
|
err: nil,
|
|
|
|
done: make(chan bool),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close waits for the upload to end, closes the pipeat.PipeWriterAt and returns an error if any.
|
|
|
|
func (p *PipeWriter) Close() error {
|
|
|
|
p.writer.Close() //nolint:errcheck // the returned error is always null
|
|
|
|
<-p.done
|
|
|
|
return p.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Done unlocks other goroutines waiting on Close().
|
|
|
|
// It must be called when the upload ends
|
|
|
|
func (p *PipeWriter) Done(err error) {
|
|
|
|
p.err = err
|
|
|
|
p.done <- true
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteAt is a wrapper for pipeat WriteAt
|
|
|
|
func (p *PipeWriter) WriteAt(data []byte, off int64) (int, error) {
|
|
|
|
return p.writer.WriteAt(data, off)
|
|
|
|
}
|
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
// Write is a wrapper for pipeat Write
|
|
|
|
func (p *PipeWriter) Write(data []byte) (int, error) {
|
|
|
|
return p.writer.Write(data)
|
|
|
|
}
|
|
|
|
|
2020-01-19 06:41:05 +00:00
|
|
|
// IsDirectory checks if a path exists and is a directory
|
|
|
|
func IsDirectory(fs Fs, path string) (bool, error) {
|
|
|
|
fileInfo, err := fs.Stat(path)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return fileInfo.IsDir(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsLocalOsFs returns true if fs is the local filesystem implementation
|
|
|
|
func IsLocalOsFs(fs Fs) bool {
|
|
|
|
return fs.Name() == osFsName
|
|
|
|
}
|
|
|
|
|
|
|
|
// ValidateS3FsConfig returns nil if the specified s3 config is valid, otherwise an error
|
|
|
|
func ValidateS3FsConfig(config *S3FsConfig) error {
|
|
|
|
if len(config.Bucket) == 0 {
|
|
|
|
return errors.New("bucket cannot be empty")
|
|
|
|
}
|
|
|
|
if len(config.Region) == 0 {
|
|
|
|
return errors.New("region cannot be empty")
|
|
|
|
}
|
2020-02-16 09:14:44 +00:00
|
|
|
if len(config.AccessKey) == 0 && len(config.AccessSecret) > 0 {
|
|
|
|
return errors.New("access_key cannot be empty with access_secret not empty")
|
|
|
|
}
|
|
|
|
if len(config.AccessSecret) == 0 && len(config.AccessKey) > 0 {
|
|
|
|
return errors.New("access_secret cannot be empty with access_key not empty")
|
|
|
|
}
|
2020-10-25 07:18:48 +00:00
|
|
|
if config.KeyPrefix != "" {
|
2020-01-19 22:23:09 +00:00
|
|
|
if strings.HasPrefix(config.KeyPrefix, "/") {
|
|
|
|
return errors.New("key_prefix cannot start with /")
|
|
|
|
}
|
|
|
|
config.KeyPrefix = path.Clean(config.KeyPrefix)
|
|
|
|
if !strings.HasSuffix(config.KeyPrefix, "/") {
|
|
|
|
config.KeyPrefix += "/"
|
|
|
|
}
|
|
|
|
}
|
2020-10-25 11:10:11 +00:00
|
|
|
if config.UploadPartSize != 0 && (config.UploadPartSize < 5 || config.UploadPartSize > 5000) {
|
|
|
|
return errors.New("upload_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
|
2020-03-13 07:54:36 +00:00
|
|
|
}
|
2020-10-25 16:41:04 +00:00
|
|
|
if config.UploadConcurrency < 0 || config.UploadConcurrency > 64 {
|
2020-03-13 18:13:58 +00:00
|
|
|
return fmt.Errorf("invalid upload concurrency: %v", config.UploadConcurrency)
|
|
|
|
}
|
2020-01-19 06:41:05 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-31 18:04:00 +00:00
|
|
|
// ValidateGCSFsConfig returns nil if the specified GCS config is valid, otherwise an error
|
|
|
|
func ValidateGCSFsConfig(config *GCSFsConfig, credentialsFilePath string) error {
|
2020-10-25 07:18:48 +00:00
|
|
|
if config.Bucket == "" {
|
2020-01-31 18:04:00 +00:00
|
|
|
return errors.New("bucket cannot be empty")
|
|
|
|
}
|
2020-10-25 07:18:48 +00:00
|
|
|
if config.KeyPrefix != "" {
|
2020-01-31 18:04:00 +00:00
|
|
|
if strings.HasPrefix(config.KeyPrefix, "/") {
|
|
|
|
return errors.New("key_prefix cannot start with /")
|
|
|
|
}
|
|
|
|
config.KeyPrefix = path.Clean(config.KeyPrefix)
|
|
|
|
if !strings.HasSuffix(config.KeyPrefix, "/") {
|
|
|
|
config.KeyPrefix += "/"
|
|
|
|
}
|
|
|
|
}
|
2020-02-19 08:41:15 +00:00
|
|
|
if len(config.Credentials) == 0 && config.AutomaticCredentials == 0 {
|
2020-01-31 18:04:00 +00:00
|
|
|
fi, err := os.Stat(credentialsFilePath)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("invalid credentials %v", err)
|
|
|
|
}
|
|
|
|
if fi.Size() == 0 {
|
|
|
|
return errors.New("credentials cannot be empty")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-25 07:18:48 +00:00
|
|
|
// ValidateAzBlobFsConfig returns nil if the specified Azure Blob config is valid, otherwise an error
|
|
|
|
func ValidateAzBlobFsConfig(config *AzBlobFsConfig) error {
|
|
|
|
if config.SASURL != "" {
|
|
|
|
_, err := url.Parse(config.SASURL)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if config.Container == "" {
|
|
|
|
return errors.New("container cannot be empty")
|
|
|
|
}
|
|
|
|
if config.AccountName == "" || config.AccountKey == "" {
|
|
|
|
return errors.New("credentials cannot be empty")
|
|
|
|
}
|
|
|
|
if config.KeyPrefix != "" {
|
|
|
|
if strings.HasPrefix(config.KeyPrefix, "/") {
|
|
|
|
return errors.New("key_prefix cannot start with /")
|
|
|
|
}
|
|
|
|
config.KeyPrefix = path.Clean(config.KeyPrefix)
|
|
|
|
if !strings.HasSuffix(config.KeyPrefix, "/") {
|
|
|
|
config.KeyPrefix += "/"
|
|
|
|
}
|
|
|
|
}
|
2020-10-25 11:10:11 +00:00
|
|
|
if config.UploadPartSize < 0 || config.UploadPartSize > 100 {
|
2020-10-25 07:18:48 +00:00
|
|
|
return fmt.Errorf("invalid upload part size: %v", config.UploadPartSize)
|
|
|
|
}
|
2020-10-25 16:41:04 +00:00
|
|
|
if config.UploadConcurrency < 0 || config.UploadConcurrency > 64 {
|
2020-10-25 07:18:48 +00:00
|
|
|
return fmt.Errorf("invalid upload concurrency: %v", config.UploadConcurrency)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-19 06:41:05 +00:00
|
|
|
// SetPathPermissions calls fs.Chown.
|
|
|
|
// It does nothing for local filesystem on windows
|
|
|
|
func SetPathPermissions(fs Fs, path string, uid int, gid int) {
|
|
|
|
if IsLocalOsFs(fs) {
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := fs.Chown(path, uid, gid); err != nil {
|
|
|
|
fsLog(fs, logger.LevelWarn, "error chowning path %v: %v", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func fsLog(fs Fs, level logger.LogLevel, format string, v ...interface{}) {
|
|
|
|
logger.Log(level, fs.Name(), fs.ConnectionID(), format, v...)
|
|
|
|
}
|