vfs.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. // Package vfs provides local and remote filesystems support
  2. package vfs
  3. import (
  4. "errors"
  5. "fmt"
  6. "io"
  7. "net/url"
  8. "os"
  9. "path"
  10. "path/filepath"
  11. "runtime"
  12. "strings"
  13. "time"
  14. "github.com/eikenb/pipeat"
  15. "github.com/drakkan/sftpgo/kms"
  16. "github.com/drakkan/sftpgo/logger"
  17. "github.com/drakkan/sftpgo/utils"
  18. )
  19. const dirMimeType = "inode/directory"
  20. var validAzAccessTier = []string{"", "Archive", "Hot", "Cool"}
  21. // Fs defines the interface for filesystem backends
  22. type Fs interface {
  23. Name() string
  24. ConnectionID() string
  25. Stat(name string) (os.FileInfo, error)
  26. Lstat(name string) (os.FileInfo, error)
  27. Open(name string, offset int64) (File, *pipeat.PipeReaderAt, func(), error)
  28. Create(name string, flag int) (File, *PipeWriter, func(), error)
  29. Rename(source, target string) error
  30. Remove(name string, isDir bool) error
  31. Mkdir(name string) error
  32. Symlink(source, target string) error
  33. Chown(name string, uid int, gid int) error
  34. Chmod(name string, mode os.FileMode) error
  35. Chtimes(name string, atime, mtime time.Time) error
  36. Truncate(name string, size int64) error
  37. ReadDir(dirname string) ([]os.FileInfo, error)
  38. Readlink(name string) (string, error)
  39. IsUploadResumeSupported() bool
  40. IsAtomicUploadSupported() bool
  41. CheckRootPath(username string, uid int, gid int) bool
  42. ResolvePath(sftpPath string) (string, error)
  43. IsNotExist(err error) bool
  44. IsPermission(err error) bool
  45. IsNotSupported(err error) bool
  46. ScanRootDirContents() (int, int64, error)
  47. GetDirSize(dirname string) (int, int64, error)
  48. GetAtomicUploadPath(name string) string
  49. GetRelativePath(name string) string
  50. Walk(root string, walkFn filepath.WalkFunc) error
  51. Join(elem ...string) string
  52. HasVirtualFolders() bool
  53. GetMimeType(name string) (string, error)
  54. }
  55. // File defines an interface representing a SFTPGo file
  56. type File interface {
  57. io.Reader
  58. io.Writer
  59. io.Closer
  60. io.ReaderAt
  61. io.WriterAt
  62. io.Seeker
  63. Stat() (os.FileInfo, error)
  64. Name() string
  65. Truncate(size int64) error
  66. }
  67. // ErrVfsUnsupported defines the error for an unsupported VFS operation
  68. var ErrVfsUnsupported = errors.New("Not supported")
  69. // QuotaCheckResult defines the result for a quota check
  70. type QuotaCheckResult struct {
  71. HasSpace bool
  72. AllowedSize int64
  73. AllowedFiles int
  74. UsedSize int64
  75. UsedFiles int
  76. QuotaSize int64
  77. QuotaFiles int
  78. }
  79. // GetRemainingSize returns the remaining allowed size
  80. func (q *QuotaCheckResult) GetRemainingSize() int64 {
  81. if q.QuotaSize > 0 {
  82. return q.QuotaSize - q.UsedSize
  83. }
  84. return 0
  85. }
  86. // GetRemainingFiles returns the remaining allowed files
  87. func (q *QuotaCheckResult) GetRemainingFiles() int {
  88. if q.QuotaFiles > 0 {
  89. return q.QuotaFiles - q.UsedFiles
  90. }
  91. return 0
  92. }
  93. // S3FsConfig defines the configuration for S3 based filesystem
  94. type S3FsConfig struct {
  95. Bucket string `json:"bucket,omitempty"`
  96. // KeyPrefix is similar to a chroot directory for local filesystem.
  97. // If specified then the SFTP user will only see objects that starts
  98. // with this prefix and so you can restrict access to a specific
  99. // folder. The prefix, if not empty, must not start with "/" and must
  100. // end with "/".
  101. // If empty the whole bucket contents will be available
  102. KeyPrefix string `json:"key_prefix,omitempty"`
  103. Region string `json:"region,omitempty"`
  104. AccessKey string `json:"access_key,omitempty"`
  105. AccessSecret *kms.Secret `json:"access_secret,omitempty"`
  106. Endpoint string `json:"endpoint,omitempty"`
  107. StorageClass string `json:"storage_class,omitempty"`
  108. // The buffer size (in MB) to use for multipart uploads. The minimum allowed part size is 5MB,
  109. // and if this value is set to zero, the default value (5MB) for the AWS SDK will be used.
  110. // The minimum allowed value is 5.
  111. // Please note that if the upload bandwidth between the SFTP client and SFTPGo is greater than
  112. // the upload bandwidth between SFTPGo and S3 then the SFTP client have to wait for the upload
  113. // of the last parts to S3 after it ends the file upload to SFTPGo, and it may time out.
  114. // Keep this in mind if you customize these parameters.
  115. UploadPartSize int64 `json:"upload_part_size,omitempty"`
  116. // How many parts are uploaded in parallel
  117. UploadConcurrency int `json:"upload_concurrency,omitempty"`
  118. }
  119. // GCSFsConfig defines the configuration for Google Cloud Storage based filesystem
  120. type GCSFsConfig struct {
  121. Bucket string `json:"bucket,omitempty"`
  122. // KeyPrefix is similar to a chroot directory for local filesystem.
  123. // If specified then the SFTP user will only see objects that starts
  124. // with this prefix and so you can restrict access to a specific
  125. // folder. The prefix, if not empty, must not start with "/" and must
  126. // end with "/".
  127. // If empty the whole bucket contents will be available
  128. KeyPrefix string `json:"key_prefix,omitempty"`
  129. CredentialFile string `json:"-"`
  130. Credentials *kms.Secret `json:"credentials,omitempty"`
  131. // 0 explicit, 1 automatic
  132. AutomaticCredentials int `json:"automatic_credentials,omitempty"`
  133. StorageClass string `json:"storage_class,omitempty"`
  134. }
  135. // AzBlobFsConfig defines the configuration for Azure Blob Storage based filesystem
  136. type AzBlobFsConfig struct {
  137. Container string `json:"container,omitempty"`
  138. // Storage Account Name, leave blank to use SAS URL
  139. AccountName string `json:"account_name,omitempty"`
  140. // Storage Account Key leave blank to use SAS URL.
  141. // The access key is stored encrypted based on the kms configuration
  142. AccountKey *kms.Secret `json:"account_key,omitempty"`
  143. // Optional endpoint. Default is "blob.core.windows.net".
  144. // If you use the emulator the endpoint must include the protocol,
  145. // for example "http://127.0.0.1:10000"
  146. Endpoint string `json:"endpoint,omitempty"`
  147. // Shared access signature URL, leave blank if using account/key
  148. SASURL string `json:"sas_url,omitempty"`
  149. // KeyPrefix is similar to a chroot directory for local filesystem.
  150. // If specified then the SFTPGo userd will only see objects that starts
  151. // with this prefix and so you can restrict access to a specific
  152. // folder. The prefix, if not empty, must not start with "/" and must
  153. // end with "/".
  154. // If empty the whole bucket contents will be available
  155. KeyPrefix string `json:"key_prefix,omitempty"`
  156. // The buffer size (in MB) to use for multipart uploads.
  157. // If this value is set to zero, the default value (1MB) for the Azure SDK will be used.
  158. // Please note that if the upload bandwidth between the SFTPGo client and SFTPGo server is
  159. // greater than the upload bandwidth between SFTPGo and Azure then the SFTP client have
  160. // to wait for the upload of the last parts to Azure after it ends the file upload to SFTPGo,
  161. // and it may time out.
  162. // Keep this in mind if you customize these parameters.
  163. UploadPartSize int64 `json:"upload_part_size,omitempty"`
  164. // How many parts are uploaded in parallel
  165. UploadConcurrency int `json:"upload_concurrency,omitempty"`
  166. // Set to true if you use an Azure emulator such as Azurite
  167. UseEmulator bool `json:"use_emulator,omitempty"`
  168. // Blob Access Tier
  169. AccessTier string `json:"access_tier,omitempty"`
  170. }
  171. // PipeWriter defines a wrapper for pipeat.PipeWriterAt.
  172. type PipeWriter struct {
  173. writer *pipeat.PipeWriterAt
  174. err error
  175. done chan bool
  176. }
  177. // NewPipeWriter initializes a new PipeWriter
  178. func NewPipeWriter(w *pipeat.PipeWriterAt) *PipeWriter {
  179. return &PipeWriter{
  180. writer: w,
  181. err: nil,
  182. done: make(chan bool),
  183. }
  184. }
  185. // Close waits for the upload to end, closes the pipeat.PipeWriterAt and returns an error if any.
  186. func (p *PipeWriter) Close() error {
  187. p.writer.Close() //nolint:errcheck // the returned error is always null
  188. <-p.done
  189. return p.err
  190. }
  191. // Done unlocks other goroutines waiting on Close().
  192. // It must be called when the upload ends
  193. func (p *PipeWriter) Done(err error) {
  194. p.err = err
  195. p.done <- true
  196. }
  197. // WriteAt is a wrapper for pipeat WriteAt
  198. func (p *PipeWriter) WriteAt(data []byte, off int64) (int, error) {
  199. return p.writer.WriteAt(data, off)
  200. }
  201. // Write is a wrapper for pipeat Write
  202. func (p *PipeWriter) Write(data []byte) (int, error) {
  203. return p.writer.Write(data)
  204. }
  205. // IsDirectory checks if a path exists and is a directory
  206. func IsDirectory(fs Fs, path string) (bool, error) {
  207. fileInfo, err := fs.Stat(path)
  208. if err != nil {
  209. return false, err
  210. }
  211. return fileInfo.IsDir(), err
  212. }
  213. // IsLocalOsFs returns true if fs is the local filesystem implementation
  214. func IsLocalOsFs(fs Fs) bool {
  215. return fs.Name() == osFsName
  216. }
  217. func checkS3Credentials(config *S3FsConfig) error {
  218. if config.AccessKey == "" && !config.AccessSecret.IsEmpty() {
  219. return errors.New("access_key cannot be empty with access_secret not empty")
  220. }
  221. if config.AccessSecret.IsEmpty() && config.AccessKey != "" {
  222. return errors.New("access_secret cannot be empty with access_key not empty")
  223. }
  224. if config.AccessSecret.IsEncrypted() && !config.AccessSecret.IsValid() {
  225. return errors.New("invalid encrypted access_secret")
  226. }
  227. if !config.AccessSecret.IsEmpty() && !config.AccessSecret.IsValidInput() {
  228. return errors.New("invalid access_secret")
  229. }
  230. return nil
  231. }
  232. // ValidateS3FsConfig returns nil if the specified s3 config is valid, otherwise an error
  233. func ValidateS3FsConfig(config *S3FsConfig) error {
  234. if config.AccessSecret == nil {
  235. config.AccessSecret = kms.NewEmptySecret()
  236. }
  237. if config.Bucket == "" {
  238. return errors.New("bucket cannot be empty")
  239. }
  240. if config.Region == "" {
  241. return errors.New("region cannot be empty")
  242. }
  243. if err := checkS3Credentials(config); err != nil {
  244. return err
  245. }
  246. if config.KeyPrefix != "" {
  247. if strings.HasPrefix(config.KeyPrefix, "/") {
  248. return errors.New("key_prefix cannot start with /")
  249. }
  250. config.KeyPrefix = path.Clean(config.KeyPrefix)
  251. if !strings.HasSuffix(config.KeyPrefix, "/") {
  252. config.KeyPrefix += "/"
  253. }
  254. }
  255. if config.UploadPartSize != 0 && (config.UploadPartSize < 5 || config.UploadPartSize > 5000) {
  256. return errors.New("upload_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
  257. }
  258. if config.UploadConcurrency < 0 || config.UploadConcurrency > 64 {
  259. return fmt.Errorf("invalid upload concurrency: %v", config.UploadConcurrency)
  260. }
  261. return nil
  262. }
  263. // ValidateGCSFsConfig returns nil if the specified GCS config is valid, otherwise an error
  264. func ValidateGCSFsConfig(config *GCSFsConfig, credentialsFilePath string) error {
  265. if config.Credentials == nil {
  266. config.Credentials = kms.NewEmptySecret()
  267. }
  268. if config.Bucket == "" {
  269. return errors.New("bucket cannot be empty")
  270. }
  271. if config.KeyPrefix != "" {
  272. if strings.HasPrefix(config.KeyPrefix, "/") {
  273. return errors.New("key_prefix cannot start with /")
  274. }
  275. config.KeyPrefix = path.Clean(config.KeyPrefix)
  276. if !strings.HasSuffix(config.KeyPrefix, "/") {
  277. config.KeyPrefix += "/"
  278. }
  279. }
  280. if config.Credentials.IsEncrypted() && !config.Credentials.IsValid() {
  281. return errors.New("invalid encrypted credentials")
  282. }
  283. if !config.Credentials.IsValidInput() && config.AutomaticCredentials == 0 {
  284. fi, err := os.Stat(credentialsFilePath)
  285. if err != nil {
  286. return fmt.Errorf("invalid credentials %v", err)
  287. }
  288. if fi.Size() == 0 {
  289. return errors.New("credentials cannot be empty")
  290. }
  291. }
  292. return nil
  293. }
  294. func checkAzCredentials(config *AzBlobFsConfig) error {
  295. if config.AccountName == "" || !config.AccountKey.IsValidInput() {
  296. return errors.New("credentials cannot be empty or invalid")
  297. }
  298. if config.AccountKey.IsEncrypted() && !config.AccountKey.IsValid() {
  299. return errors.New("invalid encrypted account_key")
  300. }
  301. return nil
  302. }
  303. // ValidateAzBlobFsConfig returns nil if the specified Azure Blob config is valid, otherwise an error
  304. func ValidateAzBlobFsConfig(config *AzBlobFsConfig) error {
  305. if config.AccountKey == nil {
  306. config.AccountKey = kms.NewEmptySecret()
  307. }
  308. if config.SASURL != "" {
  309. _, err := url.Parse(config.SASURL)
  310. return err
  311. }
  312. if config.Container == "" {
  313. return errors.New("container cannot be empty")
  314. }
  315. if err := checkAzCredentials(config); err != nil {
  316. return err
  317. }
  318. if config.KeyPrefix != "" {
  319. if strings.HasPrefix(config.KeyPrefix, "/") {
  320. return errors.New("key_prefix cannot start with /")
  321. }
  322. config.KeyPrefix = path.Clean(config.KeyPrefix)
  323. if !strings.HasSuffix(config.KeyPrefix, "/") {
  324. config.KeyPrefix += "/"
  325. }
  326. }
  327. if config.UploadPartSize < 0 || config.UploadPartSize > 100 {
  328. return fmt.Errorf("invalid upload part size: %v", config.UploadPartSize)
  329. }
  330. if config.UploadConcurrency < 0 || config.UploadConcurrency > 64 {
  331. return fmt.Errorf("invalid upload concurrency: %v", config.UploadConcurrency)
  332. }
  333. if !utils.IsStringInSlice(config.AccessTier, validAzAccessTier) {
  334. return fmt.Errorf("invalid access tier %#v, valid values: \"''%v\"", config.AccessTier, strings.Join(validAzAccessTier, ", "))
  335. }
  336. return nil
  337. }
  338. // SetPathPermissions calls fs.Chown.
  339. // It does nothing for local filesystem on windows
  340. func SetPathPermissions(fs Fs, path string, uid int, gid int) {
  341. if IsLocalOsFs(fs) {
  342. if runtime.GOOS == "windows" {
  343. return
  344. }
  345. }
  346. if err := fs.Chown(path, uid, gid); err != nil {
  347. fsLog(fs, logger.LevelWarn, "error chowning path %v: %v", path, err)
  348. }
  349. }
  350. func fsLog(fs Fs, level logger.LogLevel, format string, v ...interface{}) {
  351. logger.Log(level, fs.Name(), fs.ConnectionID(), format, v...)
  352. }