gcsfs.go 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018
  1. // Copyright (C) 2019 Nicola Murino
  2. //
  3. // This program is free software: you can redistribute it and/or modify
  4. // it under the terms of the GNU Affero General Public License as published
  5. // by the Free Software Foundation, version 3.
  6. //
  7. // This program is distributed in the hope that it will be useful,
  8. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. // GNU Affero General Public License for more details.
  11. //
  12. // You should have received a copy of the GNU Affero General Public License
  13. // along with this program. If not, see <https://www.gnu.org/licenses/>.
  14. //go:build !nogcs
  15. // +build !nogcs
  16. package vfs
  17. import (
  18. "context"
  19. "errors"
  20. "fmt"
  21. "io"
  22. "mime"
  23. "net/http"
  24. "os"
  25. "path"
  26. "path/filepath"
  27. "strconv"
  28. "strings"
  29. "time"
  30. "cloud.google.com/go/storage"
  31. "github.com/eikenb/pipeat"
  32. "github.com/pkg/sftp"
  33. "github.com/rs/xid"
  34. "google.golang.org/api/googleapi"
  35. "google.golang.org/api/iterator"
  36. "google.golang.org/api/option"
  37. "github.com/drakkan/sftpgo/v2/internal/logger"
  38. "github.com/drakkan/sftpgo/v2/internal/metric"
  39. "github.com/drakkan/sftpgo/v2/internal/util"
  40. "github.com/drakkan/sftpgo/v2/internal/version"
  41. )
  42. const (
  43. defaultGCSPageSize = 5000
  44. )
  45. var (
  46. gcsDefaultFieldsSelection = []string{"Name", "Size", "Deleted", "Updated", "ContentType", "Metadata"}
  47. )
  48. // GCSFs is a Fs implementation for Google Cloud Storage.
  49. type GCSFs struct {
  50. connectionID string
  51. localTempDir string
  52. // if not empty this fs is mouted as virtual folder in the specified path
  53. mountPath string
  54. config *GCSFsConfig
  55. svc *storage.Client
  56. ctxTimeout time.Duration
  57. ctxLongTimeout time.Duration
  58. }
  59. func init() {
  60. version.AddFeature("+gcs")
  61. }
  62. // NewGCSFs returns an GCSFs object that allows to interact with Google Cloud Storage
  63. func NewGCSFs(connectionID, localTempDir, mountPath string, config GCSFsConfig) (Fs, error) {
  64. if localTempDir == "" {
  65. localTempDir = getLocalTempDir()
  66. }
  67. var err error
  68. fs := &GCSFs{
  69. connectionID: connectionID,
  70. localTempDir: localTempDir,
  71. mountPath: getMountPath(mountPath),
  72. config: &config,
  73. ctxTimeout: 30 * time.Second,
  74. ctxLongTimeout: 300 * time.Second,
  75. }
  76. if err = fs.config.validate(); err != nil {
  77. return fs, err
  78. }
  79. ctx := context.Background()
  80. if fs.config.AutomaticCredentials > 0 {
  81. fs.svc, err = storage.NewClient(ctx)
  82. } else {
  83. err = fs.config.Credentials.TryDecrypt()
  84. if err != nil {
  85. return fs, err
  86. }
  87. fs.svc, err = storage.NewClient(ctx, option.WithCredentialsJSON([]byte(fs.config.Credentials.GetPayload())))
  88. }
  89. return fs, err
  90. }
  91. // Name returns the name for the Fs implementation
  92. func (fs *GCSFs) Name() string {
  93. return fmt.Sprintf("%s bucket %q", gcsfsName, fs.config.Bucket)
  94. }
  95. // ConnectionID returns the connection ID associated to this Fs implementation
  96. func (fs *GCSFs) ConnectionID() string {
  97. return fs.connectionID
  98. }
  99. // Stat returns a FileInfo describing the named file
  100. func (fs *GCSFs) Stat(name string) (os.FileInfo, error) {
  101. if name == "" || name == "/" || name == "." {
  102. return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
  103. }
  104. if fs.config.KeyPrefix == name+"/" {
  105. return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
  106. }
  107. return fs.getObjectStat(name)
  108. }
  109. // Lstat returns a FileInfo describing the named file
  110. func (fs *GCSFs) Lstat(name string) (os.FileInfo, error) {
  111. return fs.Stat(name)
  112. }
  113. // Open opens the named file for reading
  114. func (fs *GCSFs) Open(name string, offset int64) (File, PipeReader, func(), error) {
  115. r, w, err := pipeat.PipeInDir(fs.localTempDir)
  116. if err != nil {
  117. return nil, nil, nil, err
  118. }
  119. p := NewPipeReader(r)
  120. if readMetadata > 0 {
  121. attrs, err := fs.headObject(name)
  122. if err != nil {
  123. r.Close()
  124. w.Close()
  125. return nil, nil, nil, err
  126. }
  127. p.setMetadata(attrs.Metadata)
  128. }
  129. bkt := fs.svc.Bucket(fs.config.Bucket)
  130. obj := bkt.Object(name)
  131. ctx, cancelFn := context.WithCancel(context.Background())
  132. objectReader, err := obj.NewRangeReader(ctx, offset, -1)
  133. if err == nil && offset > 0 && objectReader.Attrs.ContentEncoding == "gzip" {
  134. err = fmt.Errorf("range request is not possible for gzip content encoding, requested offset %d", offset)
  135. objectReader.Close()
  136. }
  137. if err != nil {
  138. r.Close()
  139. w.Close()
  140. cancelFn()
  141. return nil, nil, nil, err
  142. }
  143. go func() {
  144. defer cancelFn()
  145. defer objectReader.Close()
  146. n, err := io.Copy(w, objectReader)
  147. w.CloseWithError(err) //nolint:errcheck
  148. fsLog(fs, logger.LevelDebug, "download completed, path: %q size: %v, err: %+v", name, n, err)
  149. metric.GCSTransferCompleted(n, 1, err)
  150. }()
  151. return nil, p, cancelFn, nil
  152. }
  153. // Create creates or opens the named file for writing
  154. func (fs *GCSFs) Create(name string, flag, checks int) (File, PipeWriter, func(), error) {
  155. if checks&CheckParentDir != 0 {
  156. _, err := fs.Stat(path.Dir(name))
  157. if err != nil {
  158. return nil, nil, nil, err
  159. }
  160. }
  161. r, w, err := pipeat.PipeInDir(fs.localTempDir)
  162. if err != nil {
  163. return nil, nil, nil, err
  164. }
  165. var partialFileName string
  166. var attrs *storage.ObjectAttrs
  167. var statErr error
  168. bkt := fs.svc.Bucket(fs.config.Bucket)
  169. obj := bkt.Object(name)
  170. if flag == -1 {
  171. obj = obj.If(storage.Conditions{DoesNotExist: true})
  172. } else {
  173. attrs, statErr = fs.headObject(name)
  174. if statErr == nil {
  175. obj = obj.If(storage.Conditions{GenerationMatch: attrs.Generation})
  176. } else if fs.IsNotExist(statErr) {
  177. obj = obj.If(storage.Conditions{DoesNotExist: true})
  178. } else {
  179. fsLog(fs, logger.LevelWarn, "unable to set precondition for %q, stat err: %v", name, statErr)
  180. }
  181. }
  182. ctx, cancelFn := context.WithCancel(context.Background())
  183. var p PipeWriter
  184. var objectWriter *storage.Writer
  185. if checks&CheckResume != 0 {
  186. if statErr != nil {
  187. cancelFn()
  188. r.Close()
  189. w.Close()
  190. return nil, nil, nil, fmt.Errorf("unable to resume %q stat error: %w", name, statErr)
  191. }
  192. p = newPipeWriterAtOffset(w, attrs.Size)
  193. partialFileName = fs.getTempObject(name)
  194. partialObj := bkt.Object(partialFileName)
  195. partialObj = partialObj.If(storage.Conditions{DoesNotExist: true})
  196. objectWriter = partialObj.NewWriter(ctx)
  197. } else {
  198. p = NewPipeWriter(w)
  199. objectWriter = obj.NewWriter(ctx)
  200. }
  201. if fs.config.UploadPartSize > 0 {
  202. objectWriter.ChunkSize = int(fs.config.UploadPartSize) * 1024 * 1024
  203. }
  204. if fs.config.UploadPartMaxTime > 0 {
  205. objectWriter.ChunkRetryDeadline = time.Duration(fs.config.UploadPartMaxTime) * time.Second
  206. }
  207. fs.setWriterAttrs(objectWriter, flag, name)
  208. go func() {
  209. defer cancelFn()
  210. n, err := io.Copy(objectWriter, r)
  211. closeErr := objectWriter.Close()
  212. if err == nil {
  213. err = closeErr
  214. }
  215. if err == nil && partialFileName != "" {
  216. partialObject := bkt.Object(partialFileName)
  217. partialObject = partialObject.If(storage.Conditions{GenerationMatch: objectWriter.Attrs().Generation})
  218. err = fs.composeObjects(ctx, obj, partialObject)
  219. }
  220. r.CloseWithError(err) //nolint:errcheck
  221. p.Done(err)
  222. fsLog(fs, logger.LevelDebug, "upload completed, path: %q, acl: %q, readed bytes: %v, err: %+v",
  223. name, fs.config.ACL, n, err)
  224. metric.GCSTransferCompleted(n, 0, err)
  225. }()
  226. if uploadMode&8 != 0 {
  227. return nil, p, nil, nil
  228. }
  229. return nil, p, cancelFn, nil
  230. }
  231. // Rename renames (moves) source to target.
  232. func (fs *GCSFs) Rename(source, target string) (int, int64, error) {
  233. if source == target {
  234. return -1, -1, nil
  235. }
  236. _, err := fs.Stat(path.Dir(target))
  237. if err != nil {
  238. return -1, -1, err
  239. }
  240. fi, err := fs.getObjectStat(source)
  241. if err != nil {
  242. return -1, -1, err
  243. }
  244. return fs.renameInternal(source, target, fi, 0)
  245. }
  246. // Remove removes the named file or (empty) directory.
  247. func (fs *GCSFs) Remove(name string, isDir bool) error {
  248. if isDir {
  249. hasContents, err := fs.hasContents(name)
  250. if err != nil {
  251. return err
  252. }
  253. if hasContents {
  254. return fmt.Errorf("cannot remove non empty directory: %q", name)
  255. }
  256. if !strings.HasSuffix(name, "/") {
  257. name += "/"
  258. }
  259. }
  260. obj := fs.svc.Bucket(fs.config.Bucket).Object(name)
  261. attrs, statErr := fs.headObject(name)
  262. if statErr == nil {
  263. obj = obj.If(storage.Conditions{GenerationMatch: attrs.Generation})
  264. } else {
  265. fsLog(fs, logger.LevelWarn, "unable to set precondition for deleting %q, stat err: %v",
  266. name, statErr)
  267. }
  268. ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
  269. defer cancelFn()
  270. err := obj.Delete(ctx)
  271. if isDir && fs.IsNotExist(err) {
  272. // we can have directories without a trailing "/" (created using v2.1.0 and before)
  273. ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
  274. defer cancelFn()
  275. err = fs.svc.Bucket(fs.config.Bucket).Object(strings.TrimSuffix(name, "/")).Delete(ctx)
  276. }
  277. metric.GCSDeleteObjectCompleted(err)
  278. return err
  279. }
  280. // Mkdir creates a new directory with the specified name and default permissions
  281. func (fs *GCSFs) Mkdir(name string) error {
  282. _, err := fs.Stat(name)
  283. if !fs.IsNotExist(err) {
  284. return err
  285. }
  286. return fs.mkdirInternal(name)
  287. }
  288. // Symlink creates source as a symbolic link to target.
  289. func (*GCSFs) Symlink(_, _ string) error {
  290. return ErrVfsUnsupported
  291. }
  292. // Readlink returns the destination of the named symbolic link
  293. func (*GCSFs) Readlink(_ string) (string, error) {
  294. return "", ErrVfsUnsupported
  295. }
  296. // Chown changes the numeric uid and gid of the named file.
  297. func (*GCSFs) Chown(_ string, _ int, _ int) error {
  298. return ErrVfsUnsupported
  299. }
  300. // Chmod changes the mode of the named file to mode.
  301. func (*GCSFs) Chmod(_ string, _ os.FileMode) error {
  302. return ErrVfsUnsupported
  303. }
  304. // Chtimes changes the access and modification times of the named file.
  305. func (fs *GCSFs) Chtimes(name string, _, mtime time.Time, isUploading bool) error {
  306. if isUploading {
  307. return nil
  308. }
  309. obj := fs.svc.Bucket(fs.config.Bucket).Object(name)
  310. attrs, err := fs.headObject(name)
  311. if err != nil {
  312. return err
  313. }
  314. obj = obj.If(storage.Conditions{MetagenerationMatch: attrs.Metageneration})
  315. ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
  316. defer cancelFn()
  317. metadata := attrs.Metadata
  318. if metadata == nil {
  319. metadata = make(map[string]string)
  320. }
  321. metadata[lastModifiedField] = strconv.FormatInt(mtime.UnixMilli(), 10)
  322. objectAttrsToUpdate := storage.ObjectAttrsToUpdate{
  323. Metadata: metadata,
  324. }
  325. _, err = obj.Update(ctx, objectAttrsToUpdate)
  326. return err
  327. }
  328. // Truncate changes the size of the named file.
  329. // Truncate by path is not supported, while truncating an opened
  330. // file is handled inside base transfer
  331. func (*GCSFs) Truncate(_ string, _ int64) error {
  332. return ErrVfsUnsupported
  333. }
  334. // ReadDir reads the directory named by dirname and returns
  335. // a list of directory entries.
  336. func (fs *GCSFs) ReadDir(dirname string) (DirLister, error) {
  337. // dirname must be already cleaned
  338. prefix := fs.getPrefix(dirname)
  339. query := &storage.Query{Prefix: prefix, Delimiter: "/"}
  340. err := query.SetAttrSelection(gcsDefaultFieldsSelection)
  341. if err != nil {
  342. return nil, err
  343. }
  344. bkt := fs.svc.Bucket(fs.config.Bucket)
  345. return &gcsDirLister{
  346. bucket: bkt,
  347. query: query,
  348. timeout: fs.ctxTimeout,
  349. prefix: prefix,
  350. prefixes: make(map[string]bool),
  351. }, nil
  352. }
  353. // IsUploadResumeSupported returns true if resuming uploads is supported.
  354. // Resuming uploads is not supported on GCS
  355. func (*GCSFs) IsUploadResumeSupported() bool {
  356. return false
  357. }
  358. // IsConditionalUploadResumeSupported returns if resuming uploads is supported
  359. // for the specified size
  360. func (*GCSFs) IsConditionalUploadResumeSupported(_ int64) bool {
  361. return true
  362. }
  363. // IsAtomicUploadSupported returns true if atomic upload is supported.
  364. // S3 uploads are already atomic, we don't need to upload to a temporary
  365. // file
  366. func (*GCSFs) IsAtomicUploadSupported() bool {
  367. return false
  368. }
  369. // IsNotExist returns a boolean indicating whether the error is known to
  370. // report that a file or directory does not exist
  371. func (*GCSFs) IsNotExist(err error) bool {
  372. if err == nil {
  373. return false
  374. }
  375. if err == storage.ErrObjectNotExist || err == storage.ErrBucketNotExist {
  376. return true
  377. }
  378. if e, ok := err.(*googleapi.Error); ok {
  379. if e.Code == http.StatusNotFound {
  380. return true
  381. }
  382. }
  383. return false
  384. }
  385. // IsPermission returns a boolean indicating whether the error is known to
  386. // report that permission is denied.
  387. func (*GCSFs) IsPermission(err error) bool {
  388. if err == nil {
  389. return false
  390. }
  391. if e, ok := err.(*googleapi.Error); ok {
  392. if e.Code == http.StatusForbidden || e.Code == http.StatusUnauthorized {
  393. return true
  394. }
  395. }
  396. return false
  397. }
  398. // IsNotSupported returns true if the error indicate an unsupported operation
  399. func (*GCSFs) IsNotSupported(err error) bool {
  400. if err == nil {
  401. return false
  402. }
  403. return errors.Is(err, ErrVfsUnsupported)
  404. }
  405. // CheckRootPath creates the specified local root directory if it does not exists
  406. func (fs *GCSFs) CheckRootPath(username string, uid int, gid int) bool {
  407. // we need a local directory for temporary files
  408. osFs := NewOsFs(fs.ConnectionID(), fs.localTempDir, "", nil)
  409. return osFs.CheckRootPath(username, uid, gid)
  410. }
  411. // ScanRootDirContents returns the number of files contained in the bucket,
  412. // and their size
  413. func (fs *GCSFs) ScanRootDirContents() (int, int64, error) {
  414. return fs.GetDirSize(fs.config.KeyPrefix)
  415. }
  416. // GetDirSize returns the number of files and the size for a folder
  417. // including any subfolders
  418. func (fs *GCSFs) GetDirSize(dirname string) (int, int64, error) {
  419. prefix := fs.getPrefix(dirname)
  420. numFiles := 0
  421. size := int64(0)
  422. query := &storage.Query{Prefix: prefix}
  423. err := query.SetAttrSelection(gcsDefaultFieldsSelection)
  424. if err != nil {
  425. return numFiles, size, err
  426. }
  427. iteratePage := func(nextPageToken string) (string, error) {
  428. ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
  429. defer cancelFn()
  430. bkt := fs.svc.Bucket(fs.config.Bucket)
  431. it := bkt.Objects(ctx, query)
  432. pager := iterator.NewPager(it, defaultGCSPageSize, nextPageToken)
  433. var objects []*storage.ObjectAttrs
  434. pageToken, err := pager.NextPage(&objects)
  435. if err != nil {
  436. return pageToken, err
  437. }
  438. for _, attrs := range objects {
  439. if !attrs.Deleted.IsZero() {
  440. continue
  441. }
  442. isDir := strings.HasSuffix(attrs.Name, "/") || attrs.ContentType == dirMimeType
  443. if isDir && attrs.Size == 0 {
  444. continue
  445. }
  446. numFiles++
  447. size += attrs.Size
  448. }
  449. return pageToken, nil
  450. }
  451. pageToken := ""
  452. for {
  453. pageToken, err = iteratePage(pageToken)
  454. if err != nil {
  455. metric.GCSListObjectsCompleted(err)
  456. return numFiles, size, err
  457. }
  458. fsLog(fs, logger.LevelDebug, "scan in progress for %q, files: %d, size: %d", dirname, numFiles, size)
  459. if pageToken == "" {
  460. break
  461. }
  462. }
  463. metric.GCSListObjectsCompleted(nil)
  464. return numFiles, size, err
  465. }
  466. // GetAtomicUploadPath returns the path to use for an atomic upload.
  467. // GCS uploads are already atomic, we never call this method for GCS
  468. func (*GCSFs) GetAtomicUploadPath(_ string) string {
  469. return ""
  470. }
  471. // GetRelativePath returns the path for a file relative to the user's home dir.
  472. // This is the path as seen by SFTPGo users
  473. func (fs *GCSFs) GetRelativePath(name string) string {
  474. rel := path.Clean(name)
  475. if rel == "." {
  476. rel = ""
  477. }
  478. if !path.IsAbs(rel) {
  479. rel = "/" + rel
  480. }
  481. if fs.config.KeyPrefix != "" {
  482. if !strings.HasPrefix(rel, "/"+fs.config.KeyPrefix) {
  483. rel = "/"
  484. }
  485. rel = path.Clean("/" + strings.TrimPrefix(rel, "/"+fs.config.KeyPrefix))
  486. }
  487. if fs.mountPath != "" {
  488. rel = path.Join(fs.mountPath, rel)
  489. }
  490. return rel
  491. }
  492. // Walk walks the file tree rooted at root, calling walkFn for each file or
  493. // directory in the tree, including root
  494. func (fs *GCSFs) Walk(root string, walkFn filepath.WalkFunc) error {
  495. prefix := fs.getPrefix(root)
  496. query := &storage.Query{Prefix: prefix}
  497. err := query.SetAttrSelection(gcsDefaultFieldsSelection)
  498. if err != nil {
  499. walkFn(root, nil, err) //nolint:errcheck
  500. return err
  501. }
  502. iteratePage := func(nextPageToken string) (string, error) {
  503. ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
  504. defer cancelFn()
  505. bkt := fs.svc.Bucket(fs.config.Bucket)
  506. it := bkt.Objects(ctx, query)
  507. pager := iterator.NewPager(it, defaultGCSPageSize, nextPageToken)
  508. var objects []*storage.ObjectAttrs
  509. pageToken, err := pager.NextPage(&objects)
  510. if err != nil {
  511. walkFn(root, nil, err) //nolint:errcheck
  512. return pageToken, err
  513. }
  514. for _, attrs := range objects {
  515. if !attrs.Deleted.IsZero() {
  516. continue
  517. }
  518. name, isDir := fs.resolve(attrs.Name, prefix, attrs.ContentType)
  519. if name == "" {
  520. continue
  521. }
  522. objectModTime := attrs.Updated
  523. if val := getLastModified(attrs.Metadata); val > 0 {
  524. objectModTime = util.GetTimeFromMsecSinceEpoch(val)
  525. }
  526. err = walkFn(attrs.Name, NewFileInfo(name, isDir, attrs.Size, objectModTime, false), nil)
  527. if err != nil {
  528. return pageToken, err
  529. }
  530. }
  531. return pageToken, nil
  532. }
  533. pageToken := ""
  534. for {
  535. pageToken, err = iteratePage(pageToken)
  536. if err != nil {
  537. metric.GCSListObjectsCompleted(err)
  538. return err
  539. }
  540. if pageToken == "" {
  541. break
  542. }
  543. }
  544. walkFn(root, NewFileInfo(root, true, 0, time.Unix(0, 0), false), err) //nolint:errcheck
  545. metric.GCSListObjectsCompleted(err)
  546. return err
  547. }
  548. // Join joins any number of path elements into a single path
  549. func (*GCSFs) Join(elem ...string) string {
  550. return strings.TrimPrefix(path.Join(elem...), "/")
  551. }
  552. // HasVirtualFolders returns true if folders are emulated
  553. func (GCSFs) HasVirtualFolders() bool {
  554. return true
  555. }
  556. // ResolvePath returns the matching filesystem path for the specified virtual path
  557. func (fs *GCSFs) ResolvePath(virtualPath string) (string, error) {
  558. if fs.mountPath != "" {
  559. virtualPath = strings.TrimPrefix(virtualPath, fs.mountPath)
  560. }
  561. if !path.IsAbs(virtualPath) {
  562. virtualPath = path.Clean("/" + virtualPath)
  563. }
  564. return fs.Join(fs.config.KeyPrefix, strings.TrimPrefix(virtualPath, "/")), nil
  565. }
  566. // CopyFile implements the FsFileCopier interface
  567. func (fs *GCSFs) CopyFile(source, target string, srcSize int64) (int, int64, error) {
  568. numFiles := 1
  569. sizeDiff := srcSize
  570. var conditions *storage.Conditions
  571. attrs, err := fs.headObject(target)
  572. if err == nil {
  573. sizeDiff -= attrs.Size
  574. numFiles = 0
  575. conditions = &storage.Conditions{GenerationMatch: attrs.Generation}
  576. } else {
  577. if !fs.IsNotExist(err) {
  578. return 0, 0, err
  579. }
  580. conditions = &storage.Conditions{DoesNotExist: true}
  581. }
  582. if err := fs.copyFileInternal(source, target, conditions); err != nil {
  583. return 0, 0, err
  584. }
  585. return numFiles, sizeDiff, nil
  586. }
  587. func (fs *GCSFs) resolve(name, prefix, contentType string) (string, bool) {
  588. result := strings.TrimPrefix(name, prefix)
  589. isDir := strings.HasSuffix(result, "/")
  590. if isDir {
  591. result = strings.TrimSuffix(result, "/")
  592. }
  593. if contentType == dirMimeType {
  594. isDir = true
  595. }
  596. return result, isDir
  597. }
  598. // getObjectStat returns the stat result
  599. func (fs *GCSFs) getObjectStat(name string) (os.FileInfo, error) {
  600. attrs, err := fs.headObject(name)
  601. if err == nil {
  602. objSize := attrs.Size
  603. objectModTime := attrs.Updated
  604. if val := getLastModified(attrs.Metadata); val > 0 {
  605. objectModTime = util.GetTimeFromMsecSinceEpoch(val)
  606. }
  607. isDir := attrs.ContentType == dirMimeType || strings.HasSuffix(attrs.Name, "/")
  608. return NewFileInfo(name, isDir, objSize, objectModTime, false), nil
  609. }
  610. if !fs.IsNotExist(err) {
  611. return nil, err
  612. }
  613. // now check if this is a prefix (virtual directory)
  614. hasContents, err := fs.hasContents(name)
  615. if err != nil {
  616. return nil, err
  617. }
  618. if hasContents {
  619. return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
  620. }
  621. // finally check if this is an object with a trailing /
  622. attrs, err = fs.headObject(name + "/")
  623. if err != nil {
  624. return nil, err
  625. }
  626. objectModTime := attrs.Updated
  627. if val := getLastModified(attrs.Metadata); val > 0 {
  628. objectModTime = util.GetTimeFromMsecSinceEpoch(val)
  629. }
  630. return NewFileInfo(name, true, attrs.Size, objectModTime, false), nil
  631. }
  632. func (fs *GCSFs) setWriterAttrs(objectWriter *storage.Writer, flag int, name string) {
  633. var contentType string
  634. if flag == -1 {
  635. contentType = dirMimeType
  636. } else {
  637. contentType = mime.TypeByExtension(path.Ext(name))
  638. }
  639. if contentType != "" {
  640. objectWriter.ObjectAttrs.ContentType = contentType
  641. }
  642. if fs.config.StorageClass != "" {
  643. objectWriter.ObjectAttrs.StorageClass = fs.config.StorageClass
  644. }
  645. if fs.config.ACL != "" {
  646. objectWriter.PredefinedACL = fs.config.ACL
  647. }
  648. }
  649. func (fs *GCSFs) composeObjects(ctx context.Context, dst, partialObject *storage.ObjectHandle) error {
  650. fsLog(fs, logger.LevelDebug, "start object compose for partial file %q, destination %q",
  651. partialObject.ObjectName(), dst.ObjectName())
  652. composer := dst.ComposerFrom(dst, partialObject)
  653. if fs.config.StorageClass != "" {
  654. composer.StorageClass = fs.config.StorageClass
  655. }
  656. if fs.config.ACL != "" {
  657. composer.PredefinedACL = fs.config.ACL
  658. }
  659. contentType := mime.TypeByExtension(path.Ext(dst.ObjectName()))
  660. if contentType != "" {
  661. composer.ContentType = contentType
  662. }
  663. _, err := composer.Run(ctx)
  664. fsLog(fs, logger.LevelDebug, "object compose for %q finished, err: %v", dst.ObjectName(), err)
  665. delCtx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
  666. defer cancelFn()
  667. errDelete := partialObject.Delete(delCtx)
  668. metric.GCSDeleteObjectCompleted(errDelete)
  669. fsLog(fs, logger.LevelDebug, "deleted partial file %q after composing with %q, err: %v",
  670. partialObject.ObjectName(), dst.ObjectName(), errDelete)
  671. return err
  672. }
  673. func (fs *GCSFs) copyFileInternal(source, target string, conditions *storage.Conditions) error {
  674. src := fs.svc.Bucket(fs.config.Bucket).Object(source)
  675. dst := fs.svc.Bucket(fs.config.Bucket).Object(target)
  676. if conditions != nil {
  677. dst = dst.If(*conditions)
  678. } else {
  679. attrs, err := fs.headObject(target)
  680. if err == nil {
  681. dst = dst.If(storage.Conditions{GenerationMatch: attrs.Generation})
  682. } else if fs.IsNotExist(err) {
  683. dst = dst.If(storage.Conditions{DoesNotExist: true})
  684. } else {
  685. fsLog(fs, logger.LevelWarn, "unable to set precondition for copy, target %q, stat err: %v",
  686. target, err)
  687. }
  688. }
  689. ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxLongTimeout))
  690. defer cancelFn()
  691. copier := dst.CopierFrom(src)
  692. if fs.config.StorageClass != "" {
  693. copier.StorageClass = fs.config.StorageClass
  694. }
  695. if fs.config.ACL != "" {
  696. copier.PredefinedACL = fs.config.ACL
  697. }
  698. contentType := mime.TypeByExtension(path.Ext(source))
  699. if contentType != "" {
  700. copier.ContentType = contentType
  701. }
  702. _, err := copier.Run(ctx)
  703. metric.GCSCopyObjectCompleted(err)
  704. return err
  705. }
  706. func (fs *GCSFs) renameInternal(source, target string, fi os.FileInfo, recursion int) (int, int64, error) {
  707. var numFiles int
  708. var filesSize int64
  709. if fi.IsDir() {
  710. if renameMode == 0 {
  711. hasContents, err := fs.hasContents(source)
  712. if err != nil {
  713. return numFiles, filesSize, err
  714. }
  715. if hasContents {
  716. return numFiles, filesSize, fmt.Errorf("%w: cannot rename non empty directory: %q", ErrVfsUnsupported, source)
  717. }
  718. }
  719. if err := fs.mkdirInternal(target); err != nil {
  720. return numFiles, filesSize, err
  721. }
  722. if renameMode == 1 {
  723. files, size, err := doRecursiveRename(fs, source, target, fs.renameInternal, recursion)
  724. numFiles += files
  725. filesSize += size
  726. if err != nil {
  727. return numFiles, filesSize, err
  728. }
  729. }
  730. } else {
  731. if err := fs.copyFileInternal(source, target, nil); err != nil {
  732. return numFiles, filesSize, err
  733. }
  734. numFiles++
  735. filesSize += fi.Size()
  736. }
  737. err := fs.Remove(source, fi.IsDir())
  738. if fs.IsNotExist(err) {
  739. err = nil
  740. }
  741. return numFiles, filesSize, err
  742. }
  743. func (fs *GCSFs) mkdirInternal(name string) error {
  744. if !strings.HasSuffix(name, "/") {
  745. name += "/"
  746. }
  747. _, w, _, err := fs.Create(name, -1, 0)
  748. if err != nil {
  749. return err
  750. }
  751. return w.Close()
  752. }
  753. func (fs *GCSFs) hasContents(name string) (bool, error) {
  754. result := false
  755. prefix := fs.getPrefix(name)
  756. query := &storage.Query{Prefix: prefix}
  757. err := query.SetAttrSelection(gcsDefaultFieldsSelection)
  758. if err != nil {
  759. return result, err
  760. }
  761. ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
  762. defer cancelFn()
  763. bkt := fs.svc.Bucket(fs.config.Bucket)
  764. it := bkt.Objects(ctx, query)
  765. // if we have a dir object with a trailing slash it will be returned so we set the size to 2
  766. pager := iterator.NewPager(it, 2, "")
  767. var objects []*storage.ObjectAttrs
  768. _, err = pager.NextPage(&objects)
  769. if err != nil {
  770. metric.GCSListObjectsCompleted(err)
  771. return result, err
  772. }
  773. for _, attrs := range objects {
  774. name, _ := fs.resolve(attrs.Name, prefix, attrs.ContentType)
  775. // a dir object with a trailing slash will result in an empty name
  776. if name == "/" || name == "" {
  777. continue
  778. }
  779. result = true
  780. break
  781. }
  782. metric.GCSListObjectsCompleted(nil)
  783. return result, nil
  784. }
  785. func (fs *GCSFs) getPrefix(name string) string {
  786. prefix := ""
  787. if name != "" && name != "." && name != "/" {
  788. prefix = strings.TrimPrefix(name, "/")
  789. if !strings.HasSuffix(prefix, "/") {
  790. prefix += "/"
  791. }
  792. }
  793. return prefix
  794. }
  795. func (fs *GCSFs) headObject(name string) (*storage.ObjectAttrs, error) {
  796. ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
  797. defer cancelFn()
  798. bkt := fs.svc.Bucket(fs.config.Bucket)
  799. obj := bkt.Object(name)
  800. attrs, err := obj.Attrs(ctx)
  801. metric.GCSHeadObjectCompleted(err)
  802. return attrs, err
  803. }
  804. // GetMimeType returns the content type
  805. func (fs *GCSFs) GetMimeType(name string) (string, error) {
  806. attrs, err := fs.headObject(name)
  807. if err != nil {
  808. return "", err
  809. }
  810. return attrs.ContentType, nil
  811. }
  812. // Close closes the fs
  813. func (fs *GCSFs) Close() error {
  814. return nil
  815. }
  816. // GetAvailableDiskSize returns the available size for the specified path
  817. func (*GCSFs) GetAvailableDiskSize(_ string) (*sftp.StatVFS, error) {
  818. return nil, ErrStorageSizeUnavailable
  819. }
  820. func (*GCSFs) getTempObject(name string) string {
  821. dir := filepath.Dir(name)
  822. guid := xid.New().String()
  823. return filepath.Join(dir, ".sftpgo-partial."+guid+"."+filepath.Base(name))
  824. }
  825. type gcsDirLister struct {
  826. baseDirLister
  827. bucket *storage.BucketHandle
  828. query *storage.Query
  829. timeout time.Duration
  830. nextPageToken string
  831. noMorePages bool
  832. prefix string
  833. prefixes map[string]bool
  834. metricUpdated bool
  835. }
  836. func (l *gcsDirLister) resolve(name, contentType string) (string, bool) {
  837. result := strings.TrimPrefix(name, l.prefix)
  838. isDir := strings.HasSuffix(result, "/")
  839. if isDir {
  840. result = strings.TrimSuffix(result, "/")
  841. }
  842. if contentType == dirMimeType {
  843. isDir = true
  844. }
  845. return result, isDir
  846. }
  847. func (l *gcsDirLister) Next(limit int) ([]os.FileInfo, error) {
  848. if limit <= 0 {
  849. return nil, errInvalidDirListerLimit
  850. }
  851. if len(l.cache) >= limit {
  852. return l.returnFromCache(limit), nil
  853. }
  854. if l.noMorePages {
  855. if !l.metricUpdated {
  856. l.metricUpdated = true
  857. metric.GCSListObjectsCompleted(nil)
  858. }
  859. return l.returnFromCache(limit), io.EOF
  860. }
  861. ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(l.timeout))
  862. defer cancelFn()
  863. it := l.bucket.Objects(ctx, l.query)
  864. paginator := iterator.NewPager(it, defaultGCSPageSize, l.nextPageToken)
  865. var objects []*storage.ObjectAttrs
  866. pageToken, err := paginator.NextPage(&objects)
  867. if err != nil {
  868. metric.GCSListObjectsCompleted(err)
  869. return l.cache, err
  870. }
  871. for _, attrs := range objects {
  872. if attrs.Prefix != "" {
  873. name, _ := l.resolve(attrs.Prefix, attrs.ContentType)
  874. if name == "" {
  875. continue
  876. }
  877. if _, ok := l.prefixes[name]; ok {
  878. continue
  879. }
  880. l.cache = append(l.cache, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
  881. l.prefixes[name] = true
  882. } else {
  883. name, isDir := l.resolve(attrs.Name, attrs.ContentType)
  884. if name == "" {
  885. continue
  886. }
  887. if !attrs.Deleted.IsZero() {
  888. continue
  889. }
  890. if isDir {
  891. // check if the dir is already included, it will be sent as blob prefix if it contains at least one item
  892. if _, ok := l.prefixes[name]; ok {
  893. continue
  894. }
  895. l.prefixes[name] = true
  896. }
  897. modTime := attrs.Updated
  898. if val := getLastModified(attrs.Metadata); val > 0 {
  899. modTime = util.GetTimeFromMsecSinceEpoch(val)
  900. }
  901. l.cache = append(l.cache, NewFileInfo(name, isDir, attrs.Size, modTime, false))
  902. }
  903. }
  904. l.nextPageToken = pageToken
  905. l.noMorePages = (l.nextPageToken == "")
  906. return l.returnFromCache(limit), nil
  907. }
  908. func (l *gcsDirLister) Close() error {
  909. clear(l.prefixes)
  910. return l.baseDirLister.Close()
  911. }