Browse Source

Merge branch 'add_file_drive'

link 2 years ago
parent
commit
32980f4033
66 changed files with 5326 additions and 129 deletions
  1. 11 0
      build/sysroot/usr/lib/systemd/system/rclone.service
  2. 1 5
      build/sysroot/usr/share/casaos/shell/helper.sh
  3. 12 0
      drivers/all.go
  4. 30 0
      drivers/base/client.go
  5. 12 0
      drivers/base/types.go
  6. 100 0
      drivers/dropbox/drive.go
  7. 31 0
      drivers/dropbox/meta.go
  8. 88 0
      drivers/dropbox/types.go
  9. 102 0
      drivers/dropbox/util.go
  10. 183 0
      drivers/google_drive/drive.go
  11. 33 0
      drivers/google_drive/meta.go
  12. 77 0
      drivers/google_drive/types.go
  13. 152 0
      drivers/google_drive/util.go
  14. 5 2
      go.mod
  15. 9 4
      go.sum
  16. 43 0
      internal/conf/config.go
  17. 72 0
      internal/conf/const.go
  18. 30 0
      internal/conf/var.go
  19. 25 0
      internal/driver/config.go
  20. 131 0
      internal/driver/driver.go
  21. 56 0
      internal/driver/item.go
  22. 6 0
      internal/op/const.go
  23. 173 0
      internal/op/driver.go
  24. 545 0
      internal/op/fs.go
  25. 109 0
      internal/op/hook.go
  26. 36 0
      internal/sign/sign.go
  27. 22 2
      main.go
  28. 39 0
      model/args.go
  29. 6 0
      model/common.go
  30. 186 0
      model/obj.go
  31. 90 0
      model/object.go
  32. 20 0
      model/req.go
  33. 33 0
      model/setting.go
  34. 0 69
      model/smartctl_model.go
  35. 54 0
      model/storage.go
  36. 33 0
      model/stream.go
  37. 12 0
      pkg/fs/fs.go
  38. 412 0
      pkg/generic_sync/generic_sync.go
  39. 52 0
      pkg/sign/hmac.go
  40. 15 0
      pkg/sign/sign.go
  41. 212 0
      pkg/singleflight/singleflight.go
  42. 2 1
      pkg/sqlite/db.go
  43. 18 0
      pkg/utils/balance.go
  44. 5 0
      pkg/utils/bool.go
  45. 0 19
      pkg/utils/command/command_helper.go
  46. 14 0
      pkg/utils/ctx.go
  47. 151 0
      pkg/utils/httper/drive.go
  48. 81 0
      pkg/utils/path.go
  49. 46 0
      pkg/utils/slice.go
  50. 37 0
      pkg/utils/time.go
  51. 5 0
      route/init.go
  52. 13 0
      route/v1.go
  53. 12 0
      route/v1/driver.go
  54. 250 9
      route/v1/file.go
  55. 93 0
      route/v1/file_read.go
  56. 163 0
      route/v1/recover.go
  57. 5 1
      route/v1/samba.go
  58. 131 0
      route/v1/storage.go
  59. 154 0
      service/fs.go
  60. 27 0
      service/fs_link.go
  61. 198 0
      service/fs_list.go
  62. 58 17
      service/service.go
  63. 100 0
      service/storage.go
  64. 73 0
      service/storage_old.go
  65. 34 0
      service/storage_path.go
  66. 398 0
      service/storage_service.go

+ 11 - 0
build/sysroot/usr/lib/systemd/system/rclone.service

@@ -0,0 +1,11 @@
+[Unit]
+Description=rclone
+
+[Service]
+ExecStartPre=/usr/bin/rm -f /tmp/rclone.sock
+ExecStart=/usr/bin/rclone rcd --rc-addr unix:///tmp/rclone.sock --rc-no-auth
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target

+ 1 - 5
build/sysroot/usr/share/casaos/shell/helper.sh

@@ -133,11 +133,7 @@ GetPlugInDisk() {
   fdisk -l | grep 'Disk' | grep 'sd' | awk -F , '{print substr($1,11,3)}'
   fdisk -l | grep 'Disk' | grep 'sd' | awk -F , '{print substr($1,11,3)}'
 }
 }
 
 
-#获取磁盘状态
-#param 磁盘路径
-GetDiskHealthState() {
-  smartctl -H $1 | grep "SMART Health Status" | awk -F ":" '{print$2}'
-}
+
 
 
 #获取磁盘字节数量和扇区数量
 #获取磁盘字节数量和扇区数量
 #param 磁盘路径  /dev/sda
 #param 磁盘路径  /dev/sda

+ 12 - 0
drivers/all.go

@@ -0,0 +1,12 @@
+package drivers
+
+import (
+	_ "github.com/IceWhaleTech/CasaOS/drivers/dropbox"
+	_ "github.com/IceWhaleTech/CasaOS/drivers/google_drive"
+)
+
+// All do nothing,just for import
+// same as _ import
+func All() {
+
+}

+ 30 - 0
drivers/base/client.go

@@ -0,0 +1,30 @@
+package base
+
+import (
+	"net/http"
+	"time"
+
+	"github.com/go-resty/resty/v2"
+)
+
+var NoRedirectClient *resty.Client
+var RestyClient = NewRestyClient()
+var HttpClient = &http.Client{}
+var UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
+var DefaultTimeout = time.Second * 30
+
+func init() {
+	NoRedirectClient = resty.New().SetRedirectPolicy(
+		resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+			return http.ErrUseLastResponse
+		}),
+	)
+	NoRedirectClient.SetHeader("user-agent", UserAgent)
+}
+
+func NewRestyClient() *resty.Client {
+	return resty.New().
+		SetHeader("user-agent", UserAgent).
+		SetRetryCount(3).
+		SetTimeout(DefaultTimeout)
+}

+ 12 - 0
drivers/base/types.go

@@ -0,0 +1,12 @@
+package base
+
+import "github.com/go-resty/resty/v2"
+
+type Json map[string]interface{}
+
+type TokenResp struct {
+	AccessToken  string `json:"access_token"`
+	RefreshToken string `json:"refresh_token"`
+}
+
+type ReqCallback func(req *resty.Request)

+ 100 - 0
drivers/dropbox/drive.go

@@ -0,0 +1,100 @@
+package dropbox
+
+import (
+	"context"
+	"errors"
+	"net/http"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils"
+	"github.com/go-resty/resty/v2"
+	"go.uber.org/zap"
+)
+
+type Dropbox struct {
+	model.Storage
+	Addition
+	AccessToken string
+}
+
+func (d *Dropbox) Config() driver.Config {
+	return config
+}
+
+func (d *Dropbox) GetAddition() driver.Additional {
+	return &d.Addition
+}
+
+func (d *Dropbox) Init(ctx context.Context) error {
+	if len(d.RefreshToken) == 0 {
+		d.getRefreshToken()
+	}
+	return d.refreshToken()
+}
+
+func (d *Dropbox) Drop(ctx context.Context) error {
+
+	return nil
+}
+
+func (d *Dropbox) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
+	files, err := d.getFiles(dir.GetID())
+	if err != nil {
+		return nil, err
+	}
+	return utils.SliceConvert(files, func(src File) (model.Obj, error) {
+		return fileToObj(src), nil
+	})
+}
+
+func (d *Dropbox) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
+	url := "https://content.dropboxapi.com/2/files/download"
+	link := model.Link{
+		URL:    url,
+		Method: http.MethodPost,
+		Header: http.Header{
+			"Authorization":   []string{"Bearer " + d.AccessToken},
+			"Dropbox-API-Arg": []string{`{"path": "` + file.GetPath() + `"}`},
+		},
+	}
+	return &link, nil
+}
+func (d *Dropbox) GetUserInfo(ctx context.Context) (string, error) {
+	url := "https://api.dropboxapi.com/2/users/get_current_account"
+	user := UserInfo{}
+	resp, err := d.request(url, http.MethodPost, func(req *resty.Request) {
+		req.SetHeader("Content-Type", "")
+	}, &user)
+	if err != nil {
+		return "", err
+	}
+	logger.Info("resp", zap.Any("resp", string(resp)))
+	return user.Email, nil
+}
+func (d *Dropbox) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
+	return nil
+}
+
+func (d *Dropbox) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
+	return nil
+}
+
+func (d *Dropbox) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
+	return nil
+}
+
+func (d *Dropbox) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
+	return errors.New("not support")
+}
+
+func (d *Dropbox) Remove(ctx context.Context, obj model.Obj) error {
+	return nil
+}
+
+func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
+	return nil
+}
+
+var _ driver.Driver = (*Dropbox)(nil)

+ 31 - 0
drivers/dropbox/meta.go

@@ -0,0 +1,31 @@
+package dropbox
+
+import (
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/internal/op"
+)
+
+const ICONURL = "https://i.pcmag.com/imagery/reviews/02PHW91bUvLOs36qNbBzOiR-12.fit_scale.size_760x427.v1569471162.png"
+
+type Addition struct {
+	driver.RootID
+	RefreshToken   string `json:"refresh_token" required:"true" omit:"true"`
+	AppKey         string `json:"app_key" type:"string" default:"onr2ic0c0m97mxr" omit:"true"`
+	AppSecret      string `json:"app_secret" type:"string" default:"nd3cjtikbxyj3pz" omit:"true"`
+	OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" omit:"true"`
+	AuthUrl        string `json:"auth_url" type:"string" default:"https://www.dropbox.com/oauth2/authorize?client_id=onr2ic0c0m97mxr&redirect_uri=https://test-get.casaos.io&response_type=code&token_access_type=offline&state=${HOST}%2Fv1%2Frecover%2FDropbox"`
+	Icon           string `json:"icon" type:"string" default:"https://i.pcmag.com/imagery/reviews/02PHW91bUvLOs36qNbBzOiR-12.fit_scale.size_760x427.v1569471162.png"`
+	Code           string `json:"code" type:"string" help:"code from auth_url" omit:"true"`
+}
+
+var config = driver.Config{
+	Name:        "Dropbox",
+	OnlyProxy:   true,
+	DefaultRoot: "root",
+}
+
+func init() {
+	op.RegisterDriver(func() driver.Driver {
+		return &Dropbox{}
+	})
+}

+ 88 - 0
drivers/dropbox/types.go

@@ -0,0 +1,88 @@
+package dropbox
+
+import (
+	"time"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/model"
+	"go.uber.org/zap"
+)
+
+type UserInfo struct {
+	AccountID string `json:"account_id"`
+	Name      struct {
+		GivenName       string `json:"given_name"`
+		Surname         string `json:"surname"`
+		FamiliarName    string `json:"familiar_name"`
+		DisplayName     string `json:"display_name"`
+		AbbreviatedName string `json:"abbreviated_name"`
+	} `json:"name"`
+	Email         string `json:"email"`
+	EmailVerified bool   `json:"email_verified"`
+	Disabled      bool   `json:"disabled"`
+	Country       string `json:"country"`
+	Locale        string `json:"locale"`
+	ReferralLink  string `json:"referral_link"`
+	IsPaired      bool   `json:"is_paired"`
+	AccountType   struct {
+		Tag string `json:".tag"`
+	} `json:"account_type"`
+	RootInfo struct {
+		Tag             string `json:".tag"`
+		RootNamespaceID string `json:"root_namespace_id"`
+		HomeNamespaceID string `json:"home_namespace_id"`
+	} `json:"root_info"`
+}
+type TokenError struct {
+	Error            string `json:"error"`
+	ErrorDescription string `json:"error_description"`
+}
+type File struct {
+	Tag            string    `json:".tag"`
+	Name           string    `json:"name"`
+	PathLower      string    `json:"path_lower"`
+	PathDisplay    string    `json:"path_display"`
+	ID             string    `json:"id"`
+	ClientModified time.Time `json:"client_modified,omitempty"`
+	ServerModified time.Time `json:"server_modified,omitempty"`
+	Rev            string    `json:"rev,omitempty"`
+	Size           int       `json:"size,omitempty"`
+	IsDownloadable bool      `json:"is_downloadable,omitempty"`
+	ContentHash    string    `json:"content_hash,omitempty"`
+}
+
+type Files struct {
+	Files   []File `json:"entries"`
+	Cursor  string `json:"cursor"`
+	HasMore bool   `json:"has_more"`
+}
+
+type Error struct {
+	Error struct {
+		Errors []struct {
+			Domain       string `json:"domain"`
+			Reason       string `json:"reason"`
+			Message      string `json:"message"`
+			LocationType string `json:"location_type"`
+			Location     string `json:"location"`
+		}
+		Code    int    `json:"code"`
+		Message string `json:"message"`
+	} `json:"error"`
+}
+
+func fileToObj(f File) *model.ObjThumb {
+	logger.Info("dropbox file", zap.Any("file", f))
+	obj := &model.ObjThumb{
+		Object: model.Object{
+			ID:       f.ID,
+			Name:     f.Name,
+			Size:     int64(f.Size),
+			Modified: f.ClientModified,
+			IsFolder: f.Tag == "folder",
+			Path:     f.PathDisplay,
+		},
+		Thumbnail: model.Thumbnail{},
+	}
+	return obj
+}

+ 102 - 0
drivers/dropbox/util.go

@@ -0,0 +1,102 @@
+package dropbox
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/drivers/base"
+	"github.com/go-resty/resty/v2"
+	"go.uber.org/zap"
+)
+
+func (d *Dropbox) getRefreshToken() error {
+	url := "https://api.dropbox.com/oauth2/token"
+	var resp base.TokenResp
+	var e TokenError
+
+	res, err := base.RestyClient.R().SetResult(&resp).SetError(&e).
+		SetFormData(map[string]string{
+			"code":         d.Code,
+			"grant_type":   "authorization_code",
+			"redirect_uri": "https://test-get.casaos.io",
+		}).SetBasicAuth(d.Addition.AppKey, d.Addition.AppSecret).SetHeader("Content-Type", "application/x-www-form-urlencoded").Post(url)
+	if err != nil {
+		return err
+	}
+	logger.Info("get refresh token", zap.String("res", res.String()))
+	if e.Error != "" {
+		return fmt.Errorf(e.Error)
+	}
+	d.RefreshToken = resp.RefreshToken
+	return nil
+
+}
+func (d *Dropbox) refreshToken() error {
+	url := "https://api.dropbox.com/oauth2/token"
+	var resp base.TokenResp
+	var e TokenError
+
+	res, err := base.RestyClient.R().SetResult(&resp).SetError(&e).
+		SetFormData(map[string]string{
+			"refresh_token": d.RefreshToken,
+			"grant_type":    "refresh_token",
+		}).SetBasicAuth(d.Addition.AppKey, d.Addition.AppSecret).SetHeader("Content-Type", "application/x-www-form-urlencoded").Post(url)
+	if err != nil {
+		return err
+	}
+	logger.Info("get refresh token", zap.String("res", res.String()))
+	if e.Error != "" {
+		return fmt.Errorf(e.Error)
+	}
+	d.AccessToken = resp.AccessToken
+	return nil
+
+}
+func (d *Dropbox) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
+	req := base.RestyClient.R()
+	req.SetHeader("Authorization", "Bearer "+d.AccessToken)
+	req.SetHeader("Content-Type", "application/json")
+	if callback != nil {
+		callback(req)
+	}
+	if resp != nil {
+		req.SetResult(resp)
+	}
+	var e Error
+	req.SetError(&e)
+	res, err := req.Execute(method, url)
+	if err != nil {
+		return nil, err
+	}
+	if e.Error.Code != 0 {
+		if e.Error.Code == 401 {
+			err = d.refreshToken()
+			if err != nil {
+				return nil, err
+			}
+			return d.request(url, method, callback, resp)
+		}
+		return nil, fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
+	}
+	return res.Body(), nil
+}
+func (d *Dropbox) getFiles(path string) ([]File, error) {
+
+	res := make([]File, 0)
+	var resp Files
+	body := base.Json{
+		"limit": 2000,
+		"path":  path,
+	}
+
+	_, err := d.request("https://api.dropboxapi.com/2/files/list_folder", http.MethodPost, func(req *resty.Request) {
+		req.SetBody(body)
+	}, &resp)
+	if err != nil {
+		return nil, err
+	}
+	res = append(res, resp.Files...)
+
+	return res, nil
+}

+ 183 - 0
drivers/google_drive/drive.go

@@ -0,0 +1,183 @@
+package google_drive
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"strconv"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/drivers/base"
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils"
+	"github.com/go-resty/resty/v2"
+	"go.uber.org/zap"
+)
+
+type GoogleDrive struct {
+	model.Storage
+	Addition
+	AccessToken string
+}
+
+func (d *GoogleDrive) Config() driver.Config {
+	return config
+}
+
+func (d *GoogleDrive) GetAddition() driver.Additional {
+	return &d.Addition
+}
+
+func (d *GoogleDrive) Init(ctx context.Context) error {
+	if d.ChunkSize == 0 {
+		d.ChunkSize = 5
+	}
+	if len(d.RefreshToken) == 0 {
+		d.getRefreshToken()
+	}
+	return d.refreshToken()
+}
+
+func (d *GoogleDrive) Drop(ctx context.Context) error {
+	return nil
+}
+
+func (d *GoogleDrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
+	files, err := d.getFiles(dir.GetID())
+	if err != nil {
+		return nil, err
+	}
+	return utils.SliceConvert(files, func(src File) (model.Obj, error) {
+		return fileToObj(src), nil
+	})
+}
+
+func (d *GoogleDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
+	url := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s?includeItemsFromAllDrives=true&supportsAllDrives=true", file.GetID())
+	_, err := d.request(url, http.MethodGet, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	link := model.Link{
+		Method: http.MethodGet,
+		URL:    url + "&alt=media",
+		Header: http.Header{
+			"Authorization": []string{"Bearer " + d.AccessToken},
+		},
+	}
+	return &link, nil
+}
+func (d *GoogleDrive) GetUserInfo(ctx context.Context) (string, error) {
+	url := "https://content.googleapis.com/drive/v3/about?fields=user"
+	user := UserInfo{}
+	resp, err := d.request(url, http.MethodGet, nil, &user)
+	if err != nil {
+		return "", err
+	}
+	logger.Info("resp", zap.Any("resp", resp))
+	return user.User.EmailAddress, nil
+}
+
+func (d *GoogleDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
+	data := base.Json{
+		"name":     dirName,
+		"parents":  []string{parentDir.GetID()},
+		"mimeType": "application/vnd.google-apps.folder",
+	}
+	_, err := d.request("https://www.googleapis.com/drive/v3/files", http.MethodPost, func(req *resty.Request) {
+		req.SetBody(data)
+	}, nil)
+	return err
+}
+
+func (d *GoogleDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
+	query := map[string]string{
+		"addParents":    dstDir.GetID(),
+		"removeParents": "root",
+	}
+	url := "https://www.googleapis.com/drive/v3/files/" + srcObj.GetID()
+	_, err := d.request(url, http.MethodPatch, func(req *resty.Request) {
+		req.SetQueryParams(query)
+	}, nil)
+	return err
+}
+
+func (d *GoogleDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
+	data := base.Json{
+		"name": newName,
+	}
+	url := "https://www.googleapis.com/drive/v3/files/" + srcObj.GetID()
+	_, err := d.request(url, http.MethodPatch, func(req *resty.Request) {
+		req.SetBody(data)
+	}, nil)
+	return err
+}
+
+func (d *GoogleDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
+	return errors.New("not support")
+}
+
+func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
+	url := "https://www.googleapis.com/drive/v3/files/" + obj.GetID()
+	_, err := d.request(url, http.MethodDelete, nil, nil)
+	return err
+}
+
+func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
+	obj := stream.GetOld()
+	var (
+		e    Error
+		url  string
+		data base.Json
+		res  *resty.Response
+		err  error
+	)
+	if obj != nil {
+		url = fmt.Sprintf("https://www.googleapis.com/upload/drive/v3/files/%s?uploadType=resumable&supportsAllDrives=true", obj.GetID())
+		data = base.Json{}
+	} else {
+		data = base.Json{
+			"name":    stream.GetName(),
+			"parents": []string{dstDir.GetID()},
+		}
+		url = "https://www.googleapis.com/upload/drive/v3/files?uploadType=resumable&supportsAllDrives=true"
+	}
+	req := base.NoRedirectClient.R().
+		SetHeaders(map[string]string{
+			"Authorization":           "Bearer " + d.AccessToken,
+			"X-Upload-Content-Type":   stream.GetMimetype(),
+			"X-Upload-Content-Length": strconv.FormatInt(stream.GetSize(), 10),
+		}).
+		SetError(&e).SetBody(data).SetContext(ctx)
+	if obj != nil {
+		res, err = req.Patch(url)
+	} else {
+		res, err = req.Post(url)
+	}
+	if err != nil {
+		return err
+	}
+	if e.Error.Code != 0 {
+		if e.Error.Code == 401 {
+			err = d.refreshToken()
+			if err != nil {
+				return err
+			}
+			return d.Put(ctx, dstDir, stream, up)
+		}
+		return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
+	}
+	putUrl := res.Header().Get("location")
+	if stream.GetSize() < d.ChunkSize*1024*1024 {
+		_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
+			req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
+		}, nil)
+	} else {
+		err = d.chunkUpload(ctx, stream, putUrl)
+	}
+	return err
+}
+
+var _ driver.Driver = (*GoogleDrive)(nil)

+ 33 - 0
drivers/google_drive/meta.go

@@ -0,0 +1,33 @@
+package google_drive
+
+import (
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/internal/op"
+)
+
+const ICONURL = "https://i.pcmag.com/imagery/reviews/02PHW91bUvLOs36qNbBzOiR-12.fit_scale.size_760x427.v1569471162.png"
+
+type Addition struct {
+	driver.RootID
+	RefreshToken   string `json:"refresh_token" required:"true" omit:"true"`
+	OrderBy        string `json:"order_by" type:"string" help:"such as: folder,name,modifiedTime" omit:"true"`
+	OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" omit:"true"`
+	ClientID       string `json:"client_id" required:"true" default:"865173455964-4ce3gdl73ak5s15kn1vkn73htc8tant2.apps.googleusercontent.com" omit:"true"`
+	ClientSecret   string `json:"client_secret" required:"true" default:"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" omit:"true"`
+	ChunkSize      int64  `json:"chunk_size" type:"number" help:"chunk size while uploading (unit: MB)" omit:"true"`
+	AuthUrl        string `json:"auth_url" type:"string" default:"https://accounts.google.com/o/oauth2/auth/oauthchooseaccount?response_type=code&client_id=865173455964-4ce3gdl73ak5s15kn1vkn73htc8tant2.apps.googleusercontent.com&redirect_uri=http%3A%2F%2Ftest-get.casaos.io&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&access_type=offline&approval_prompt=force&state=${HOST}%2Fv1%2Frecover%2FGoogleDrive&service=lso&o2v=1&flowName=GeneralOAuthFlow"`
+	Icon           string `json:"icon" type:"string" default:"https://i.pcmag.com/imagery/reviews/02PHW91bUvLOs36qNbBzOiR-12.fit_scale.size_760x427.v1569471162.png"`
+	Code           string `json:"code" type:"string" help:"code from auth_url" omit:"true"`
+}
+
+var config = driver.Config{
+	Name:        "GoogleDrive",
+	OnlyProxy:   true,
+	DefaultRoot: "root",
+}
+
+func init() {
+	op.RegisterDriver(func() driver.Driver {
+		return &GoogleDrive{}
+	})
+}

+ 77 - 0
drivers/google_drive/types.go

@@ -0,0 +1,77 @@
+package google_drive
+
+import (
+	"strconv"
+	"time"
+
+	"github.com/IceWhaleTech/CasaOS/model"
+	log "github.com/sirupsen/logrus"
+)
+
+type UserInfo struct {
+	User struct {
+		Kind         string `json:"kind"`
+		DisplayName  string `json:"displayName"`
+		PhotoLink    string `json:"photoLink"`
+		Me           bool   `json:"me"`
+		PermissionID string `json:"permissionId"`
+		EmailAddress string `json:"emailAddress"`
+	} `json:"user"`
+}
+
+type TokenError struct {
+	Error            string `json:"error"`
+	ErrorDescription string `json:"error_description"`
+}
+
+type Files struct {
+	NextPageToken string `json:"nextPageToken"`
+	Files         []File `json:"files"`
+}
+
+type File struct {
+	Id              string    `json:"id"`
+	Name            string    `json:"name"`
+	MimeType        string    `json:"mimeType"`
+	ModifiedTime    time.Time `json:"modifiedTime"`
+	Size            string    `json:"size"`
+	ThumbnailLink   string    `json:"thumbnailLink"`
+	ShortcutDetails struct {
+		TargetId       string `json:"targetId"`
+		TargetMimeType string `json:"targetMimeType"`
+	} `json:"shortcutDetails"`
+}
+
+func fileToObj(f File) *model.ObjThumb {
+	log.Debugf("google file: %+v", f)
+	size, _ := strconv.ParseInt(f.Size, 10, 64)
+	obj := &model.ObjThumb{
+		Object: model.Object{
+			ID:       f.Id,
+			Name:     f.Name,
+			Size:     size,
+			Modified: f.ModifiedTime,
+			IsFolder: f.MimeType == "application/vnd.google-apps.folder",
+		},
+		Thumbnail: model.Thumbnail{},
+	}
+	if f.MimeType == "application/vnd.google-apps.shortcut" {
+		obj.ID = f.ShortcutDetails.TargetId
+		obj.IsFolder = f.ShortcutDetails.TargetMimeType == "application/vnd.google-apps.folder"
+	}
+	return obj
+}
+
+type Error struct {
+	Error struct {
+		Errors []struct {
+			Domain       string `json:"domain"`
+			Reason       string `json:"reason"`
+			Message      string `json:"message"`
+			LocationType string `json:"location_type"`
+			Location     string `json:"location"`
+		}
+		Code    int    `json:"code"`
+		Message string `json:"message"`
+	} `json:"error"`
+}

+ 152 - 0
drivers/google_drive/util.go

@@ -0,0 +1,152 @@
+package google_drive
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"strconv"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/drivers/base"
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils"
+	"github.com/go-resty/resty/v2"
+	log "github.com/sirupsen/logrus"
+	"go.uber.org/zap"
+)
+
+// do others that not defined in Driver interface
+
+func (d *GoogleDrive) getRefreshToken() error {
+	url := "https://www.googleapis.com/oauth2/v4/token"
+	var resp base.TokenResp
+	var e TokenError
+	res, err := base.RestyClient.R().SetResult(&resp).SetError(&e).
+		SetFormData(map[string]string{
+			"client_id":     d.ClientID,
+			"client_secret": d.ClientSecret,
+			"code":          d.Code,
+			"grant_type":    "authorization_code",
+			"redirect_uri":  "http://test-get.casaos.io",
+		}).Post(url)
+	if err != nil {
+		return err
+	}
+	logger.Info("get refresh token", zap.String("res", res.String()))
+	if e.Error != "" {
+		return fmt.Errorf(e.Error)
+	}
+	d.RefreshToken = resp.RefreshToken
+	return nil
+}
+
+func (d *GoogleDrive) refreshToken() error {
+	url := "https://www.googleapis.com/oauth2/v4/token"
+	var resp base.TokenResp
+	var e TokenError
+	res, err := base.RestyClient.R().SetResult(&resp).SetError(&e).
+		SetFormData(map[string]string{
+			"client_id":     d.ClientID,
+			"client_secret": d.ClientSecret,
+			"refresh_token": d.RefreshToken,
+			"grant_type":    "refresh_token",
+		}).Post(url)
+	if err != nil {
+		return err
+	}
+	log.Debug(res.String())
+	if e.Error != "" {
+		return fmt.Errorf(e.Error)
+	}
+	d.AccessToken = resp.AccessToken
+	return nil
+}
+
+func (d *GoogleDrive) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
+	req := base.RestyClient.R()
+	req.SetHeader("Authorization", "Bearer "+d.AccessToken)
+	req.SetQueryParam("includeItemsFromAllDrives", "true")
+	req.SetQueryParam("supportsAllDrives", "true")
+	if callback != nil {
+		callback(req)
+	}
+	if resp != nil {
+		req.SetResult(resp)
+	}
+	var e Error
+	req.SetError(&e)
+	res, err := req.Execute(method, url)
+	if err != nil {
+		return nil, err
+	}
+	if e.Error.Code != 0 {
+		if e.Error.Code == 401 {
+			err = d.refreshToken()
+			if err != nil {
+				return nil, err
+			}
+			return d.request(url, method, callback, resp)
+		}
+		return nil, fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
+	}
+	return res.Body(), nil
+}
+
+func (d *GoogleDrive) getFiles(id string) ([]File, error) {
+	pageToken := "first"
+	res := make([]File, 0)
+	for pageToken != "" {
+		if pageToken == "first" {
+			pageToken = ""
+		}
+		var resp Files
+		orderBy := "folder,name,modifiedTime desc"
+		if d.OrderBy != "" {
+			orderBy = d.OrderBy + " " + d.OrderDirection
+		}
+		query := map[string]string{
+			"orderBy":  orderBy,
+			"fields":   "files(id,name,mimeType,size,modifiedTime,thumbnailLink,shortcutDetails),nextPageToken",
+			"pageSize": "1000",
+			"q":        fmt.Sprintf("'%s' in parents and trashed = false", id),
+			//"includeItemsFromAllDrives": "true",
+			//"supportsAllDrives":         "true",
+			"pageToken": pageToken,
+		}
+		_, err := d.request("https://www.googleapis.com/drive/v3/files", http.MethodGet, func(req *resty.Request) {
+			req.SetQueryParams(query)
+		}, &resp)
+		if err != nil {
+			return nil, err
+		}
+		pageToken = resp.NextPageToken
+		res = append(res, resp.Files...)
+	}
+	return res, nil
+}
+
+func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
+	var defaultChunkSize = d.ChunkSize * 1024 * 1024
+	var finish int64 = 0
+	for finish < stream.GetSize() {
+		if utils.IsCanceled(ctx) {
+			return ctx.Err()
+		}
+		chunkSize := stream.GetSize() - finish
+		if chunkSize > defaultChunkSize {
+			chunkSize = defaultChunkSize
+		}
+		_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
+			req.SetHeaders(map[string]string{
+				"Content-Length": strconv.FormatInt(chunkSize, 10),
+				"Content-Range":  fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
+			}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
+		}, nil)
+		if err != nil {
+			return err
+		}
+		finish += chunkSize
+	}
+	return nil
+}

+ 5 - 2
go.mod

@@ -14,6 +14,7 @@ require (
 	github.com/gin-gonic/gin v1.8.2
 	github.com/gin-gonic/gin v1.8.2
 	github.com/glebarez/sqlite v1.6.0
 	github.com/glebarez/sqlite v1.6.0
 	github.com/go-ini/ini v1.67.0
 	github.com/go-ini/ini v1.67.0
+	github.com/go-resty/resty/v2 v2.7.0
 	github.com/golang/mock v1.6.0
 	github.com/golang/mock v1.6.0
 	github.com/gomodule/redigo v1.8.9
 	github.com/gomodule/redigo v1.8.9
 	github.com/google/go-github/v36 v36.0.0
 	github.com/google/go-github/v36 v36.0.0
@@ -23,15 +24,17 @@ require (
 	github.com/labstack/echo/v4 v4.10.0
 	github.com/labstack/echo/v4 v4.10.0
 	github.com/mholt/archiver/v3 v3.5.1
 	github.com/mholt/archiver/v3 v3.5.1
 	github.com/patrickmn/go-cache v2.1.0+incompatible
 	github.com/patrickmn/go-cache v2.1.0+incompatible
+	github.com/pkg/errors v0.9.1
 	github.com/robfig/cron v1.2.0
 	github.com/robfig/cron v1.2.0
 	github.com/satori/go.uuid v1.2.0
 	github.com/satori/go.uuid v1.2.0
 	github.com/shirou/gopsutil/v3 v3.22.11
 	github.com/shirou/gopsutil/v3 v3.22.11
+	github.com/sirupsen/logrus v1.9.0
 	github.com/stretchr/testify v1.8.1
 	github.com/stretchr/testify v1.8.1
 	github.com/tidwall/gjson v1.14.4
 	github.com/tidwall/gjson v1.14.4
 	go.uber.org/zap v1.24.0
 	go.uber.org/zap v1.24.0
-	golang.org/x/crypto v0.4.0
+	golang.org/x/crypto v0.5.0
 	golang.org/x/oauth2 v0.3.0
 	golang.org/x/oauth2 v0.3.0
-	gorm.io/gorm v1.24.2
+	gorm.io/gorm v1.24.3
 	gotest.tools v2.2.0+incompatible
 	gotest.tools v2.2.0+incompatible
 )
 )
 
 

+ 9 - 4
go.sum

@@ -147,6 +147,8 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm
 github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
 github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
 github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
 github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
 github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
 github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
 github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
 github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
@@ -205,6 +207,8 @@ github.com/perimeterx/marshmallow v1.1.4 h1:pZLDH9RjlLGGorbXhcaQLhfuV0pFMNfPO55F
 github.com/perimeterx/marshmallow v1.1.4/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
 github.com/perimeterx/marshmallow v1.1.4/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
 github.com/pierrec/lz4/v4 v4.1.2 h1:qvY3YFXRQE/XB8MlLzJH7mSzBs74eA2gg52YTk6jUPM=
 github.com/pierrec/lz4/v4 v4.1.2 h1:qvY3YFXRQE/XB8MlLzJH7mSzBs74eA2gg52YTk6jUPM=
 github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
 github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
+github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
 github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
 github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -293,6 +297,7 @@ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
 golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
 golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
 golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
@@ -325,8 +330,8 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
 golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
@@ -374,11 +379,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gorm.io/gorm v1.24.2 h1:9wR6CFD+G8nOusLdvkZelOEhpJVwwHzpQOUM+REd6U0=
 gorm.io/gorm v1.24.2/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
 gorm.io/gorm v1.24.2/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
+gorm.io/gorm v1.24.3 h1:WL2ifUmzR/SLp85CSURAfybcHnGZ+yLSGSxgYXlFBHg=
+gorm.io/gorm v1.24.3/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
 gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
 gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
 gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
 gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
 gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo=
 gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo=

+ 43 - 0
internal/conf/config.go

@@ -0,0 +1,43 @@
+package conf
+
+type Database struct {
+	Type        string `json:"type" env:"DB_TYPE"`
+	Host        string `json:"host" env:"DB_HOST"`
+	Port        int    `json:"port" env:"DB_PORT"`
+	User        string `json:"user" env:"DB_USER"`
+	Password    string `json:"password" env:"DB_PASS"`
+	Name        string `json:"name" env:"DB_NAME"`
+	DBFile      string `json:"db_file" env:"DB_FILE"`
+	TablePrefix string `json:"table_prefix" env:"DB_TABLE_PREFIX"`
+	SSLMode     string `json:"ssl_mode" env:"DB_SSL_MODE"`
+}
+
+type Scheme struct {
+	Https    bool   `json:"https" env:"HTTPS"`
+	CertFile string `json:"cert_file" env:"CERT_FILE"`
+	KeyFile  string `json:"key_file" env:"KEY_FILE"`
+}
+
+type LogConfig struct {
+	Enable     bool   `json:"enable" env:"LOG_ENABLE"`
+	Name       string `json:"name" env:"LOG_NAME"`
+	MaxSize    int    `json:"max_size" env:"MAX_SIZE"`
+	MaxBackups int    `json:"max_backups" env:"MAX_BACKUPS"`
+	MaxAge     int    `json:"max_age" env:"MAX_AGE"`
+	Compress   bool   `json:"compress" env:"COMPRESS"`
+}
+
+type Config struct {
+	Force          bool      `json:"force" env:"FORCE"`
+	Address        string    `json:"address" env:"ADDR"`
+	Port           int       `json:"port" env:"PORT"`
+	SiteURL        string    `json:"site_url" env:"SITE_URL"`
+	Cdn            string    `json:"cdn" env:"CDN"`
+	JwtSecret      string    `json:"jwt_secret" env:"JWT_SECRET"`
+	TokenExpiresIn int       `json:"token_expires_in" env:"TOKEN_EXPIRES_IN"`
+	Database       Database  `json:"database"`
+	Scheme         Scheme    `json:"scheme"`
+	TempDir        string    `json:"temp_dir" env:"TEMP_DIR"`
+	BleveDir       string    `json:"bleve_dir" env:"BLEVE_DIR"`
+	Log            LogConfig `json:"log"`
+}

+ 72 - 0
internal/conf/const.go

@@ -0,0 +1,72 @@
+package conf
+
+const (
+	TypeString = "string"
+	TypeSelect = "select"
+	TypeBool   = "bool"
+	TypeText   = "text"
+	TypeNumber = "number"
+)
+
+const (
+	// site
+	VERSION      = "version"
+	ApiUrl       = "api_url"
+	BasePath     = "base_path"
+	SiteTitle    = "site_title"
+	Announcement = "announcement"
+	AllowIndexed = "allow_indexed"
+
+	Logo      = "logo"
+	Favicon   = "favicon"
+	MainColor = "main_color"
+
+	// preview
+	TextTypes          = "text_types"
+	AudioTypes         = "audio_types"
+	VideoTypes         = "video_types"
+	ImageTypes         = "image_types"
+	ProxyTypes         = "proxy_types"
+	ProxyIgnoreHeaders = "proxy_ignore_headers"
+	AudioAutoplay      = "audio_autoplay"
+	VideoAutoplay      = "video_autoplay"
+
+	// global
+	HideFiles           = "hide_files"
+	CustomizeHead       = "customize_head"
+	CustomizeBody       = "customize_body"
+	LinkExpiration      = "link_expiration"
+	SignAll             = "sign_all"
+	PrivacyRegs         = "privacy_regs"
+	OcrApi              = "ocr_api"
+	FilenameCharMapping = "filename_char_mapping"
+
+	// index
+	SearchIndex     = "search_index"
+	AutoUpdateIndex = "auto_update_index"
+	IndexPaths      = "index_paths"
+	IgnorePaths     = "ignore_paths"
+
+	// aria2
+	Aria2Uri    = "aria2_uri"
+	Aria2Secret = "aria2_secret"
+
+	// single
+	Token         = "token"
+	IndexProgress = "index_progress"
+
+	//Github
+	GithubClientId      = "github_client_id"
+	GithubClientSecrets = "github_client_secrets"
+	GithubLoginEnabled  = "github_login_enabled"
+)
+
+const (
+	UNKNOWN = iota
+	FOLDER
+	//OFFICE
+	VIDEO
+	AUDIO
+	TEXT
+	IMAGE
+)

+ 30 - 0
internal/conf/var.go

@@ -0,0 +1,30 @@
+package conf
+
+import "regexp"
+
+var (
+	BuiltAt    string
+	GoVersion  string
+	GitAuthor  string
+	GitCommit  string
+	Version    string = "dev"
+	WebVersion string
+)
+
+var (
+	Conf *Config
+)
+
+var SlicesMap = make(map[string][]string)
+var FilenameCharMap = make(map[string]string)
+var PrivacyReg []*regexp.Regexp
+
+var (
+	// StoragesLoaded loaded success if empty
+	StoragesLoaded = false
+)
+var (
+	RawIndexHtml string
+	ManageHtml   string
+	IndexHtml    string
+)

+ 25 - 0
internal/driver/config.go

@@ -0,0 +1,25 @@
+/*
+ * @Author: a624669980@163.com a624669980@163.com
+ * @Date: 2022-12-13 11:05:05
+ * @LastEditors: a624669980@163.com a624669980@163.com
+ * @LastEditTime: 2022-12-13 11:05:13
+ * @FilePath: /drive/internal/driver/config.go
+ * @Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
+ */
+package driver
+
+type Config struct {
+	Name        string `json:"name"`
+	LocalSort   bool   `json:"local_sort"`
+	OnlyLocal   bool   `json:"only_local"`
+	OnlyProxy   bool   `json:"only_proxy"`
+	NoCache     bool   `json:"no_cache"`
+	NoUpload    bool   `json:"no_upload"`
+	NeedMs      bool   `json:"need_ms"` // if need get message from user, such as validate code
+	DefaultRoot string `json:"default_root"`
+	CheckStatus bool
+}
+
+func (c Config) MustProxy() bool {
+	return c.OnlyProxy || c.OnlyLocal
+}

+ 131 - 0
internal/driver/driver.go

@@ -0,0 +1,131 @@
+package driver
+
+import (
+	"context"
+
+	"github.com/IceWhaleTech/CasaOS/model"
+)
+
+type Driver interface {
+	Meta
+	Reader
+	User
+	//Writer
+	//Other
+}
+
+type Meta interface {
+	Config() Config
+	// GetStorage just get raw storage, no need to implement, because model.Storage have implemented
+	GetStorage() *model.Storage
+	SetStorage(model.Storage)
+	// GetAddition Additional is used for unmarshal of JSON, so need return pointer
+	GetAddition() Additional
+	// Init If already initialized, drop first
+	Init(ctx context.Context) error
+	Drop(ctx context.Context) error
+}
+
+type Other interface {
+	Other(ctx context.Context, args model.OtherArgs) (interface{}, error)
+}
+
+type Reader interface {
+	// List files in the path
+	// if identify files by path, need to set ID with path,like path.Join(dir.GetID(), obj.GetName())
+	// if identify files by id, need to set ID with corresponding id
+	List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error)
+	// Link get url/filepath/reader of file
+	Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error)
+}
+type User interface {
+	// GetRoot get root directory of user
+	GetUserInfo(ctx context.Context) (string, error)
+}
+type Getter interface {
+	GetRoot(ctx context.Context) (model.Obj, error)
+}
+
+//type Writer interface {
+//	Mkdir
+//	Move
+//	Rename
+//	Copy
+//	Remove
+//	Put
+//}
+
+type Mkdir interface {
+	MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error
+}
+
+type Move interface {
+	Move(ctx context.Context, srcObj, dstDir model.Obj) error
+}
+
+type Rename interface {
+	Rename(ctx context.Context, srcObj model.Obj, newName string) error
+}
+
+type Copy interface {
+	Copy(ctx context.Context, srcObj, dstDir model.Obj) error
+}
+
+type Remove interface {
+	Remove(ctx context.Context, obj model.Obj) error
+}
+
+type Put interface {
+	Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) error
+}
+
+//type WriteResult interface {
+//	MkdirResult
+//	MoveResult
+//	RenameResult
+//	CopyResult
+//	PutResult
+//	Remove
+//}
+
+type MkdirResult interface {
+	MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error)
+}
+
+type MoveResult interface {
+	Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
+}
+
+type RenameResult interface {
+	Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error)
+}
+
+type CopyResult interface {
+	Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
+}
+
+type PutResult interface {
+	Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) (model.Obj, error)
+}
+
+type UpdateProgress func(percentage int)
+
+type Progress struct {
+	Total int64
+	Done  int64
+	up    UpdateProgress
+}
+
+func (p *Progress) Write(b []byte) (n int, err error) {
+	n = len(b)
+	p.Done += int64(n)
+	p.up(int(float64(p.Done) / float64(p.Total) * 100))
+	return
+}
+
+func NewProgress(total int64, up UpdateProgress) *Progress {
+	return &Progress{
+		Total: total,
+		up:    up,
+	}
+}

+ 56 - 0
internal/driver/item.go

@@ -0,0 +1,56 @@
+/*
+ * @Author: a624669980@163.com a624669980@163.com
+ * @Date: 2022-12-13 11:05:47
+ * @LastEditors: a624669980@163.com a624669980@163.com
+ * @LastEditTime: 2022-12-13 11:05:54
+ * @FilePath: /drive/internal/driver/item.go
+ * @Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
+ */
+package driver
+
+type Additional interface{}
+
+type Select string
+
+type Item struct {
+	Name     string `json:"name"`
+	Type     string `json:"type"`
+	Default  string `json:"default"`
+	Options  string `json:"options"`
+	Required bool   `json:"required"`
+	Help     string `json:"help"`
+}
+
+type Info struct {
+	Common     []Item `json:"common"`
+	Additional []Item `json:"additional"`
+	Config     Config `json:"config"`
+}
+
+type IRootPath interface {
+	GetRootPath() string
+}
+
+type IRootId interface {
+	GetRootId() string
+}
+
+type RootPath struct {
+	RootFolderPath string `json:"root_folder_path"`
+}
+
+type RootID struct {
+	RootFolderID string `json:"root_folder_id" omit:"true"`
+}
+
+func (r RootPath) GetRootPath() string {
+	return r.RootFolderPath
+}
+
+func (r *RootPath) SetRootPath(path string) {
+	r.RootFolderPath = path
+}
+
+func (r RootID) GetRootId() string {
+	return r.RootFolderID
+}

+ 6 - 0
internal/op/const.go

@@ -0,0 +1,6 @@
+package op
+
+const (
+	WORK     = "work"
+	RootName = "root"
+)

+ 173 - 0
internal/op/driver.go

@@ -0,0 +1,173 @@
+package op
+
+import (
+	"reflect"
+	"strings"
+
+	"github.com/IceWhaleTech/CasaOS/internal/conf"
+
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/pkg/errors"
+)
+
+type New func() driver.Driver
+
+var driverNewMap = map[string]New{}
+var driverInfoMap = map[string][]driver.Item{} //driver.Info{}
+
+func RegisterDriver(driver New) {
+	// log.Infof("register driver: [%s]", config.Name)
+	tempDriver := driver()
+	tempConfig := tempDriver.Config()
+	registerDriverItems(tempConfig, tempDriver.GetAddition())
+	driverNewMap[tempConfig.Name] = driver
+}
+
+func GetDriverNew(name string) (New, error) {
+	n, ok := driverNewMap[name]
+	if !ok {
+		return nil, errors.Errorf("no driver named: %s", name)
+	}
+	return n, nil
+}
+
+func GetDriverNames() []string {
+	var driverNames []string
+	for k := range driverInfoMap {
+		driverNames = append(driverNames, k)
+	}
+	return driverNames
+}
+
+//	func GetDriverInfoMap() map[string]driver.Info {
+//		return driverInfoMap
+//	}
+func GetDriverInfoMap() map[string][]driver.Item {
+	return driverInfoMap
+}
+func registerDriverItems(config driver.Config, addition driver.Additional) {
+	// log.Debugf("addition of %s: %+v", config.Name, addition)
+	tAddition := reflect.TypeOf(addition)
+	for tAddition.Kind() == reflect.Pointer {
+		tAddition = tAddition.Elem()
+	}
+	//mainItems := getMainItems(config)
+	additionalItems := getAdditionalItems(tAddition, config.DefaultRoot)
+	driverInfoMap[config.Name] = additionalItems
+	// driver.Info{
+	// 	Common:     mainItems,
+	// 	Additional: additionalItems,
+	// 	Config:     config,
+	// }
+}
+
+func getMainItems(config driver.Config) []driver.Item {
+	items := []driver.Item{{
+		Name:     "mount_path",
+		Type:     conf.TypeString,
+		Required: true,
+		Help:     "",
+	}, {
+		Name: "order",
+		Type: conf.TypeNumber,
+		Help: "use to sort",
+	}, {
+		Name: "remark",
+		Type: conf.TypeText,
+	}}
+	if !config.NoCache {
+		items = append(items, driver.Item{
+			Name:     "cache_expiration",
+			Type:     conf.TypeNumber,
+			Default:  "30",
+			Required: true,
+			Help:     "The cache expiration time for this storage",
+		})
+	}
+	if !config.OnlyProxy && !config.OnlyLocal {
+		items = append(items, []driver.Item{{
+			Name: "web_proxy",
+			Type: conf.TypeBool,
+		}, {
+			Name:     "webdav_policy",
+			Type:     conf.TypeSelect,
+			Options:  "302_redirect,use_proxy_url,native_proxy",
+			Default:  "302_redirect",
+			Required: true,
+		},
+		}...)
+	} else {
+		items = append(items, driver.Item{
+			Name:     "webdav_policy",
+			Type:     conf.TypeSelect,
+			Default:  "native_proxy",
+			Options:  "use_proxy_url,native_proxy",
+			Required: true,
+		})
+	}
+	items = append(items, driver.Item{
+		Name: "down_proxy_url",
+		Type: conf.TypeText,
+	})
+	if config.LocalSort {
+		items = append(items, []driver.Item{{
+			Name:    "order_by",
+			Type:    conf.TypeSelect,
+			Options: "name,size,modified",
+		}, {
+			Name:    "order_direction",
+			Type:    conf.TypeSelect,
+			Options: "asc,desc",
+		}}...)
+	}
+	items = append(items, driver.Item{
+		Name:    "extract_folder",
+		Type:    conf.TypeSelect,
+		Options: "front,back",
+	})
+	return items
+}
+
+func getAdditionalItems(t reflect.Type, defaultRoot string) []driver.Item {
+	var items []driver.Item
+	for i := 0; i < t.NumField(); i++ {
+
+		field := t.Field(i)
+		if field.Type.Kind() == reflect.Struct {
+			items = append(items, getAdditionalItems(field.Type, defaultRoot)...)
+			continue
+		}
+		tag := field.Tag
+		ignore, ok1 := tag.Lookup("ignore")
+		name, ok2 := tag.Lookup("json")
+		if (ok1 && ignore == "true") || !ok2 {
+			continue
+		}
+		if tag.Get("omit") == "true" {
+			continue
+		}
+		item := driver.Item{
+			Name:     name,
+			Type:     strings.ToLower(field.Type.Name()),
+			Default:  tag.Get("default"),
+			Options:  tag.Get("options"),
+			Required: tag.Get("required") == "true",
+			Help:     tag.Get("help"),
+		}
+		if tag.Get("type") != "" {
+			item.Type = tag.Get("type")
+		}
+		if item.Name == "root_folder_id" || item.Name == "root_folder_path" {
+			if item.Default == "" {
+				item.Default = defaultRoot
+			}
+			item.Required = item.Default != ""
+		}
+		// set default type to string
+		if item.Type == "" {
+			item.Type = "string"
+		}
+		items = append(items, item)
+	}
+	return items
+}

+ 545 - 0
internal/op/fs.go

@@ -0,0 +1,545 @@
+package op
+
+import (
+	"context"
+	"os"
+	stdpath "path"
+	"time"
+
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/IceWhaleTech/CasaOS/pkg/generic_sync"
+	"github.com/IceWhaleTech/CasaOS/pkg/singleflight"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils"
+	"github.com/Xhofe/go-cache"
+	"github.com/pkg/errors"
+	pkgerr "github.com/pkg/errors"
+	log "github.com/sirupsen/logrus"
+)
+
+// In order to facilitate adding some other things before and after file op
+
+var listCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
+var listG singleflight.Group[[]model.Obj]
+
+func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj model.Obj) {
+	key := Key(storage, path)
+	objs, ok := listCache.Get(key)
+	if ok {
+		for i, obj := range objs {
+			if obj.GetName() == oldObj.GetName() {
+				objs[i] = newObj
+				break
+			}
+		}
+		listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
+	}
+}
+
+func delCacheObj(storage driver.Driver, path string, obj model.Obj) {
+	key := Key(storage, path)
+	objs, ok := listCache.Get(key)
+	if ok {
+		for i, oldObj := range objs {
+			if oldObj.GetName() == obj.GetName() {
+				objs = append(objs[:i], objs[i+1:]...)
+				break
+			}
+		}
+		listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
+	}
+}
+
+var addSortDebounceMap generic_sync.MapOf[string, func(func())]
+
+func addCacheObj(storage driver.Driver, path string, newObj model.Obj) {
+	key := Key(storage, path)
+	objs, ok := listCache.Get(key)
+	if ok {
+		for i, obj := range objs {
+			if obj.GetName() == newObj.GetName() {
+				objs[i] = newObj
+				return
+			}
+		}
+
+		// Simple separation of files and folders
+		if len(objs) > 0 && objs[len(objs)-1].IsDir() == newObj.IsDir() {
+			objs = append(objs, newObj)
+		} else {
+			objs = append([]model.Obj{newObj}, objs...)
+		}
+
+		if storage.Config().LocalSort {
+			debounce, _ := addSortDebounceMap.LoadOrStore(key, utils.NewDebounce(time.Minute))
+			log.Debug("addCacheObj: wait start sort")
+			debounce(func() {
+				log.Debug("addCacheObj: start sort")
+				model.SortFiles(objs, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
+				addSortDebounceMap.Delete(key)
+			})
+		}
+
+		listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
+	}
+}
+
+func ClearCache(storage driver.Driver, path string) {
+	listCache.Del(Key(storage, path))
+}
+
+func Key(storage driver.Driver, path string) string {
+	return stdpath.Join(storage.GetStorage().MountPath, utils.FixAndCleanPath(path))
+}
+
+// List files in storage, not contains virtual file
+func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs, refresh ...bool) ([]model.Obj, error) {
+	if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+		return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+	}
+	path = utils.FixAndCleanPath(path)
+	log.Debugf("op.List %s", path)
+	key := Key(storage, path)
+	if !utils.IsBool(refresh...) {
+		if files, ok := listCache.Get(key); ok {
+			log.Debugf("use cache when list %s", path)
+			return files, nil
+		}
+	}
+	dir, err := GetUnwrap(ctx, storage, path)
+	if err != nil {
+		return nil, errors.WithMessage(err, "failed get dir")
+	}
+	log.Debugf("list dir: %+v", dir)
+	if !dir.IsDir() {
+		return nil, errors.WithStack(errors.New("not a folder"))
+	}
+	objs, err, _ := listG.Do(key, func() ([]model.Obj, error) {
+		files, err := storage.List(ctx, dir, args)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to list objs")
+		}
+		// set path
+		for _, f := range files {
+			if s, ok := f.(model.SetPath); ok && f.GetPath() == "" && dir.GetPath() != "" {
+				s.SetPath(stdpath.Join(dir.GetPath(), f.GetName()))
+			}
+		}
+		// warp obj name
+		model.WrapObjsName(files)
+		// call hooks
+		go func(reqPath string, files []model.Obj) {
+			for _, hook := range ObjsUpdateHooks {
+				hook(args.ReqPath, files)
+			}
+		}(args.ReqPath, files)
+
+		// sort objs
+		if storage.Config().LocalSort {
+			model.SortFiles(files, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
+		}
+		model.ExtractFolder(files, storage.GetStorage().ExtractFolder)
+
+		if !storage.Config().NoCache {
+			if len(files) > 0 {
+				log.Debugf("set cache: %s => %+v", key, files)
+				listCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
+			} else {
+				log.Debugf("del cache: %s", key)
+				listCache.Del(key)
+			}
+		}
+		return files, nil
+	})
+	return objs, err
+}
+
+// Get object from list of files
+func Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, error) {
+	path = utils.FixAndCleanPath(path)
+	log.Debugf("op.Get %s", path)
+
+	// is root folder
+	if utils.PathEqual(path, "/") {
+		var rootObj model.Obj
+		switch r := storage.GetAddition().(type) {
+		case driver.IRootId:
+			rootObj = &model.Object{
+				ID:       r.GetRootId(),
+				Name:     RootName,
+				Size:     0,
+				Modified: storage.GetStorage().Modified,
+				IsFolder: true,
+				Path:     path,
+			}
+		case driver.IRootPath:
+			rootObj = &model.Object{
+				Path:     r.GetRootPath(),
+				Name:     RootName,
+				Size:     0,
+				Modified: storage.GetStorage().Modified,
+				IsFolder: true,
+			}
+		default:
+			if storage, ok := storage.(driver.Getter); ok {
+				obj, err := storage.GetRoot(ctx)
+				if err != nil {
+					return nil, errors.WithMessage(err, "failed get root obj")
+				}
+				rootObj = obj
+			}
+		}
+		if rootObj == nil {
+			return nil, errors.Errorf("please implement IRootPath or IRootId or Getter method")
+		}
+		return &model.ObjWrapName{
+			Name: RootName,
+			Obj:  rootObj,
+		}, nil
+	}
+
+	// not root folder
+	dir, name := stdpath.Split(path)
+	files, err := List(ctx, storage, dir, model.ListArgs{})
+	if err != nil {
+		return nil, errors.WithMessage(err, "failed get parent list")
+	}
+	for _, f := range files {
+		// TODO maybe copy obj here
+		if f.GetName() == name {
+			return f, nil
+		}
+	}
+	log.Debugf("cant find obj with name: %s", name)
+	return nil, errors.WithStack(errors.New("object not found"))
+}
+
+func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.Obj, error) {
+	obj, err := Get(ctx, storage, path)
+	if err != nil {
+		return nil, err
+	}
+	return model.UnwrapObjs(obj), err
+}
+
+var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16))
+var linkG singleflight.Group[*model.Link]
+
+// Link get link, if is an url. should have an expiry time
+func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
+	if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+		return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+	}
+	file, err := GetUnwrap(ctx, storage, path)
+	if err != nil {
+		return nil, nil, errors.WithMessage(err, "failed to get file")
+	}
+	if file.IsDir() {
+		return nil, nil, errors.WithStack(errors.New("not a file"))
+	}
+	key := Key(storage, path) + ":" + args.IP
+	if link, ok := linkCache.Get(key); ok {
+		return link, file, nil
+	}
+	fn := func() (*model.Link, error) {
+		link, err := storage.Link(ctx, file, args)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed get link")
+		}
+		if link.Expiration != nil {
+			linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration))
+		}
+		return link, nil
+	}
+	link, err, _ := linkG.Do(key, fn)
+	return link, file, err
+}
+
+// Other api
+func Other(ctx context.Context, storage driver.Driver, args model.FsOtherArgs) (interface{}, error) {
+	obj, err := GetUnwrap(ctx, storage, args.Path)
+	if err != nil {
+		return nil, errors.WithMessagef(err, "failed to get obj")
+	}
+	if o, ok := storage.(driver.Other); ok {
+		return o.Other(ctx, model.OtherArgs{
+			Obj:    obj,
+			Method: args.Method,
+			Data:   args.Data,
+		})
+	} else {
+		return nil, errors.New("not implement")
+	}
+}
+
+var mkdirG singleflight.Group[interface{}]
+
+func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error {
+	if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+		return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+	}
+	path = utils.FixAndCleanPath(path)
+	key := Key(storage, path)
+	_, err, _ := mkdirG.Do(key, func() (interface{}, error) {
+		// check if dir exists
+		f, err := GetUnwrap(ctx, storage, path)
+		if err != nil {
+			if errors.Is(pkgerr.Cause(err), errors.New("object not found")) {
+				parentPath, dirName := stdpath.Split(path)
+				err = MakeDir(ctx, storage, parentPath)
+				if err != nil {
+					return nil, errors.WithMessagef(err, "failed to make parent dir [%s]", parentPath)
+				}
+				parentDir, err := GetUnwrap(ctx, storage, parentPath)
+				// this should not happen
+				if err != nil {
+					return nil, errors.WithMessagef(err, "failed to get parent dir [%s]", parentPath)
+				}
+
+				switch s := storage.(type) {
+				case driver.MkdirResult:
+					var newObj model.Obj
+					newObj, err = s.MakeDir(ctx, parentDir, dirName)
+					if err == nil {
+						if newObj != nil {
+							addCacheObj(storage, parentPath, model.WrapObjName(newObj))
+						} else if !utils.IsBool(lazyCache...) {
+							ClearCache(storage, parentPath)
+						}
+					}
+				case driver.Mkdir:
+					err = s.MakeDir(ctx, parentDir, dirName)
+					if err == nil && !utils.IsBool(lazyCache...) {
+						ClearCache(storage, parentPath)
+					}
+				default:
+					return nil, errors.New("not implement")
+				}
+				return nil, errors.WithStack(err)
+			}
+			return nil, errors.WithMessage(err, "failed to check if dir exists")
+		}
+		// dir exists
+		if f.IsDir() {
+			return nil, nil
+		}
+		// dir to make is a file
+		return nil, errors.New("file exists")
+	})
+	return err
+}
+
+func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, lazyCache ...bool) error {
+	if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+		return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+	}
+	srcPath = utils.FixAndCleanPath(srcPath)
+	dstDirPath = utils.FixAndCleanPath(dstDirPath)
+	srcRawObj, err := Get(ctx, storage, srcPath)
+	if err != nil {
+		return errors.WithMessage(err, "failed to get src object")
+	}
+	srcObj := model.UnwrapObjs(srcRawObj)
+	dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
+	if err != nil {
+		return errors.WithMessage(err, "failed to get dst dir")
+	}
+	srcDirPath := stdpath.Dir(srcPath)
+
+	switch s := storage.(type) {
+	case driver.MoveResult:
+		var newObj model.Obj
+		newObj, err = s.Move(ctx, srcObj, dstDir)
+		if err == nil {
+			delCacheObj(storage, srcDirPath, srcRawObj)
+			if newObj != nil {
+				addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
+			} else if !utils.IsBool(lazyCache...) {
+				ClearCache(storage, dstDirPath)
+			}
+		}
+	case driver.Move:
+		err = s.Move(ctx, srcObj, dstDir)
+		if err == nil {
+			delCacheObj(storage, srcDirPath, srcRawObj)
+			if !utils.IsBool(lazyCache...) {
+				ClearCache(storage, dstDirPath)
+			}
+		}
+	default:
+		return errors.New("not implement")
+	}
+	return errors.WithStack(err)
+}
+
+func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string, lazyCache ...bool) error {
+	if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+		return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+	}
+	srcPath = utils.FixAndCleanPath(srcPath)
+	srcRawObj, err := Get(ctx, storage, srcPath)
+	if err != nil {
+		return errors.WithMessage(err, "failed to get src object")
+	}
+	srcObj := model.UnwrapObjs(srcRawObj)
+	srcDirPath := stdpath.Dir(srcPath)
+
+	switch s := storage.(type) {
+	case driver.RenameResult:
+		var newObj model.Obj
+		newObj, err = s.Rename(ctx, srcObj, dstName)
+		if err == nil {
+			if newObj != nil {
+				updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
+			} else if !utils.IsBool(lazyCache...) {
+				ClearCache(storage, srcDirPath)
+			}
+		}
+	case driver.Rename:
+		err = s.Rename(ctx, srcObj, dstName)
+		if err == nil && !utils.IsBool(lazyCache...) {
+			ClearCache(storage, srcDirPath)
+		}
+	default:
+		return errors.New("not implement")
+	}
+	return errors.WithStack(err)
+}
+
+// Copy Just copy file[s] in a storage
+func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, lazyCache ...bool) error {
+	if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+		return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+	}
+	srcPath = utils.FixAndCleanPath(srcPath)
+	dstDirPath = utils.FixAndCleanPath(dstDirPath)
+	srcObj, err := GetUnwrap(ctx, storage, srcPath)
+	if err != nil {
+		return errors.WithMessage(err, "failed to get src object")
+	}
+	dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
+	if err != nil {
+		return errors.WithMessage(err, "failed to get dst dir")
+	}
+
+	switch s := storage.(type) {
+	case driver.CopyResult:
+		var newObj model.Obj
+		newObj, err = s.Copy(ctx, srcObj, dstDir)
+		if err == nil {
+			if newObj != nil {
+				addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
+			} else if !utils.IsBool(lazyCache...) {
+				ClearCache(storage, dstDirPath)
+			}
+		}
+	case driver.Copy:
+		err = s.Copy(ctx, srcObj, dstDir)
+		if err == nil && !utils.IsBool(lazyCache...) {
+			ClearCache(storage, dstDirPath)
+		}
+	default:
+		return errors.New("not implement")
+	}
+	return errors.WithStack(err)
+}
+
+func Remove(ctx context.Context, storage driver.Driver, path string) error {
+	if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+		return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+	}
+	path = utils.FixAndCleanPath(path)
+	rawObj, err := Get(ctx, storage, path)
+	if err != nil {
+		// if object not found, it's ok
+		if errors.Is(pkgerr.Cause(err), errors.New("object not found")) {
+			return nil
+		}
+		return errors.WithMessage(err, "failed to get object")
+	}
+	dirPath := stdpath.Dir(path)
+
+	switch s := storage.(type) {
+	case driver.Remove:
+		err = s.Remove(ctx, model.UnwrapObjs(rawObj))
+		if err == nil {
+			delCacheObj(storage, dirPath, rawObj)
+		}
+	default:
+		return errors.New("not implement")
+	}
+	return errors.WithStack(err)
+}
+
+func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *model.FileStream, up driver.UpdateProgress, lazyCache ...bool) error {
+	if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+		return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+	}
+	defer func() {
+		if f, ok := file.GetReadCloser().(*os.File); ok {
+			err := os.RemoveAll(f.Name())
+			if err != nil {
+				log.Errorf("failed to remove file [%s]", f.Name())
+			}
+		}
+	}()
+	defer func() {
+		if err := file.Close(); err != nil {
+			log.Errorf("failed to close file streamer, %v", err)
+		}
+	}()
+	// if file exist and size = 0, delete it
+	dstDirPath = utils.FixAndCleanPath(dstDirPath)
+	dstPath := stdpath.Join(dstDirPath, file.GetName())
+	fi, err := GetUnwrap(ctx, storage, dstPath)
+	if err == nil {
+		if fi.GetSize() == 0 {
+			err = Remove(ctx, storage, dstPath)
+			if err != nil {
+				return errors.WithMessagef(err, "failed remove file that exist and have size 0")
+			}
+		} else {
+			file.Old = fi
+		}
+	}
+	err = MakeDir(ctx, storage, dstDirPath)
+	if err != nil {
+		return errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
+	}
+	parentDir, err := GetUnwrap(ctx, storage, dstDirPath)
+	// this should not happen
+	if err != nil {
+		return errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
+	}
+	// if up is nil, set a default to prevent panic
+	if up == nil {
+		up = func(p int) {}
+	}
+
+	switch s := storage.(type) {
+	case driver.PutResult:
+		var newObj model.Obj
+		newObj, err = s.Put(ctx, parentDir, file, up)
+		if err == nil {
+			if newObj != nil {
+				addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
+			} else if !utils.IsBool(lazyCache...) {
+				ClearCache(storage, dstDirPath)
+			}
+		}
+	case driver.Put:
+		err = s.Put(ctx, parentDir, file, up)
+		if err == nil && !utils.IsBool(lazyCache...) {
+			ClearCache(storage, dstDirPath)
+		}
+	default:
+		return errors.New("not implement")
+	}
+	log.Debugf("put file [%s] done", file.GetName())
+	//if err == nil {
+	//	//clear cache
+	//	key := stdpath.Join(storage.GetStorage().MountPath, dstDirPath)
+	//	listCache.Del(key)
+	//}
+	return errors.WithStack(err)
+}

+ 109 - 0
internal/op/hook.go

@@ -0,0 +1,109 @@
+package op
+
+import (
+	"regexp"
+	"strings"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/internal/conf"
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/model"
+	jsoniter "github.com/json-iterator/go"
+	"github.com/pkg/errors"
+	"go.uber.org/zap"
+)
+
+// Obj
+type ObjsUpdateHook = func(parent string, objs []model.Obj)
+
+var (
+	ObjsUpdateHooks = make([]ObjsUpdateHook, 0)
+)
+
+func RegisterObjsUpdateHook(hook ObjsUpdateHook) {
+	ObjsUpdateHooks = append(ObjsUpdateHooks, hook)
+}
+
+func HandleObjsUpdateHook(parent string, objs []model.Obj) {
+	for _, hook := range ObjsUpdateHooks {
+		hook(parent, objs)
+	}
+}
+
+// Setting
+type SettingItemHook func(item *model.SettingItem) error
+
+var settingItemHooks = map[string]SettingItemHook{
+	conf.VideoTypes: func(item *model.SettingItem) error {
+		conf.SlicesMap[conf.VideoTypes] = strings.Split(item.Value, ",")
+		return nil
+	},
+	conf.AudioTypes: func(item *model.SettingItem) error {
+		conf.SlicesMap[conf.AudioTypes] = strings.Split(item.Value, ",")
+		return nil
+	},
+	conf.ImageTypes: func(item *model.SettingItem) error {
+		conf.SlicesMap[conf.ImageTypes] = strings.Split(item.Value, ",")
+		return nil
+	},
+	conf.TextTypes: func(item *model.SettingItem) error {
+		conf.SlicesMap[conf.TextTypes] = strings.Split(item.Value, ",")
+		return nil
+	},
+	conf.ProxyTypes: func(item *model.SettingItem) error {
+		conf.SlicesMap[conf.ProxyTypes] = strings.Split(item.Value, ",")
+		return nil
+	},
+	conf.ProxyIgnoreHeaders: func(item *model.SettingItem) error {
+		conf.SlicesMap[conf.ProxyIgnoreHeaders] = strings.Split(item.Value, ",")
+		return nil
+	},
+	conf.PrivacyRegs: func(item *model.SettingItem) error {
+		regStrs := strings.Split(item.Value, "\n")
+		regs := make([]*regexp.Regexp, 0, len(regStrs))
+		for _, regStr := range regStrs {
+			reg, err := regexp.Compile(regStr)
+			if err != nil {
+				return errors.WithStack(err)
+			}
+			regs = append(regs, reg)
+		}
+		conf.PrivacyReg = regs
+		return nil
+	},
+	conf.FilenameCharMapping: func(item *model.SettingItem) error {
+		var json = jsoniter.ConfigCompatibleWithStandardLibrary
+		err := json.UnmarshalFromString(item.Value, &conf.FilenameCharMap)
+		if err != nil {
+			return err
+		}
+		logger.Info("filename char mapping", zap.Any("FilenameCharMap", conf.FilenameCharMap))
+		return nil
+	},
+}
+
+func RegisterSettingItemHook(key string, hook SettingItemHook) {
+	settingItemHooks[key] = hook
+}
+
+func HandleSettingItemHook(item *model.SettingItem) (hasHook bool, err error) {
+	if hook, ok := settingItemHooks[item.Key]; ok {
+		return true, hook(item)
+	}
+	return false, nil
+}
+
+// Storage
+type StorageHook func(typ string, storage driver.Driver)
+
+var storageHooks = make([]StorageHook, 0)
+
+func CallStorageHooks(typ string, storage driver.Driver) {
+	for _, hook := range storageHooks {
+		hook(typ, storage)
+	}
+}
+
+func RegisterStorageHook(hook StorageHook) {
+	storageHooks = append(storageHooks, hook)
+}

+ 36 - 0
internal/sign/sign.go

@@ -0,0 +1,36 @@
+package sign
+
+import (
+	"sync"
+	"time"
+
+	"github.com/IceWhaleTech/CasaOS/pkg/sign"
+)
+
+var once sync.Once
+var instance sign.Sign
+
+func Sign(data string) string {
+
+	return NotExpired(data)
+
+}
+
+func WithDuration(data string, d time.Duration) string {
+	once.Do(Instance)
+	return instance.Sign(data, time.Now().Add(d).Unix())
+}
+
+func NotExpired(data string) string {
+	once.Do(Instance)
+	return instance.Sign(data, 0)
+}
+
+func Verify(data string, sign string) error {
+	once.Do(Instance)
+	return instance.Verify(data, sign)
+}
+
+func Instance() {
+	instance = sign.NewHMACSign([]byte("token"))
+}

+ 22 - 2
main.go

@@ -19,6 +19,7 @@ import (
 
 
 	"github.com/IceWhaleTech/CasaOS/pkg/cache"
 	"github.com/IceWhaleTech/CasaOS/pkg/cache"
 	"github.com/IceWhaleTech/CasaOS/pkg/config"
 	"github.com/IceWhaleTech/CasaOS/pkg/config"
+	"github.com/IceWhaleTech/CasaOS/pkg/config/configfile"
 	"github.com/IceWhaleTech/CasaOS/pkg/sqlite"
 	"github.com/IceWhaleTech/CasaOS/pkg/sqlite"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/command"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/command"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
@@ -28,6 +29,7 @@ import (
 	"github.com/coreos/go-systemd/daemon"
 	"github.com/coreos/go-systemd/daemon"
 	"go.uber.org/zap"
 	"go.uber.org/zap"
 
 
+	_ "github.com/IceWhaleTech/CasaOS/drivers"
 	"github.com/robfig/cron"
 	"github.com/robfig/cron"
 	"gorm.io/gorm"
 	"gorm.io/gorm"
 )
 )
@@ -76,8 +78,23 @@ func init() {
 	service.Cache = cache.Init()
 	service.Cache = cache.Init()
 
 
 	service.GetCPUThermalZone()
 	service.GetCPUThermalZone()
+	service.MyService.Storages().InitStorages()
 
 
 	route.InitFunction()
 	route.InitFunction()
+	data := &configfile.Storage{}
+	e := data.Load()
+	fmt.Println(e)
+	fmt.Println(data.GetSectionList())
+	// fmt.Println(data.HasSection("google"))
+	// fmt.Println(data.GetKeyList("google"))
+	// fmt.Println(data.GetValue("google", "token"))
+	// data.SetValue("google", "type", "drive")
+	// data.SetValue("google", "client_id", "865173455964-4ce3gdl73ak5s15kn1vkn73htc8tant2.apps.googleusercontent.com")
+	// data.SetValue("google", "client_secret", "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")
+	// data.SetValue("google", "scope", "drive")
+	// data.SetValue("google", "token", `{"access_token":"ya29.a0AVvZVsqsy3vWjpjsl87mtxirrtkHpkyEXdvlORzZeIahObdEtDE47-Hzo1bIg8vJhfYKh-cdqgrUM305hiEJssFMcpkM-0IwPyxlpynMFWS0L5356AUvbv3DUd_RbV_MbKijyTThuDkfrXdLIiEOwxMOtYSXmDUaCgYKAbgSAQASFQGbdwaI6ae1NZbJARogHtpjitLGkg0166","token_type":"Bearer","refresh_token":"1//01CoIJ-aZDrUPCgYIARAAGAESNwF-L9IrNLyzp1Xzfa_sPPMouyrTgJrVchPX6uXqMizXjohTdycCpVgVcu402ND-Ikn2hArRGXA","expiry":"2023-01-28T19:26:50.198064816+08:00"}`)
+	//e = data.Save()
+	//fmt.Println(e)
 }
 }
 
 
 // @title casaOS API
 // @title casaOS API
@@ -134,7 +151,7 @@ func main() {
 	if err != nil {
 	if err != nil {
 		panic(err)
 		panic(err)
 	}
 	}
-	apiPaths := []string{
+	routers := []string{
 		"/v1/sys",
 		"/v1/sys",
 		"/v1/port",
 		"/v1/port",
 		"/v1/file",
 		"/v1/file",
@@ -144,10 +161,13 @@ func main() {
 		"/v1/samba",
 		"/v1/samba",
 		"/v1/notify",
 		"/v1/notify",
 		"/v1/socketio",
 		"/v1/socketio",
+		"/v1/driver",
+		"/v1/storage",
+		"/v1/recover",
 		route.V2APIPath,
 		route.V2APIPath,
 		route.V2DocPath,
 		route.V2DocPath,
 	}
 	}
-	for _, apiPath := range apiPaths {
+	for _, apiPath := range routers {
 		err = service.MyService.Gateway().CreateRoute(&model.Route{
 		err = service.MyService.Gateway().CreateRoute(&model.Route{
 			Path:   apiPath,
 			Path:   apiPath,
 			Target: "http://" + listener.Addr().String(),
 			Target: "http://" + listener.Addr().String(),

+ 39 - 0
model/args.go

@@ -0,0 +1,39 @@
+package model
+
+import (
+	"io"
+	"net/http"
+	"time"
+)
+
+type ListArgs struct {
+	ReqPath string
+}
+
+type LinkArgs struct {
+	IP     string
+	Header http.Header
+	Type   string
+}
+
+type Link struct {
+	URL        string         `json:"url"`
+	Header     http.Header    `json:"header"` // needed header
+	Data       io.ReadCloser  // return file reader directly
+	Status     int            // status maybe 200 or 206, etc
+	FilePath   *string        // local file, return the filepath
+	Expiration *time.Duration // url expiration time
+	Method     string         `json:"method"` // http method
+}
+
+type OtherArgs struct {
+	Obj    Obj
+	Method string
+	Data   interface{}
+}
+
+type FsOtherArgs struct {
+	Path   string      `json:"path" form:"path"`
+	Method string      `json:"method" form:"method"`
+	Data   interface{} `json:"data" form:"data"`
+}

+ 6 - 0
model/common.go

@@ -0,0 +1,6 @@
+package model
+
+type PageResp struct {
+	Content interface{} `json:"content"`
+	Total   int64       `json:"total"`
+}

+ 186 - 0
model/obj.go

@@ -0,0 +1,186 @@
+package model
+
+import (
+	"io"
+	"regexp"
+	"sort"
+	"strings"
+	"time"
+
+	mapset "github.com/deckarep/golang-set/v2"
+
+	"github.com/maruel/natural"
+)
+
+type UnwrapObj interface {
+	Unwrap() Obj
+}
+
+type Obj interface {
+	GetSize() int64
+	GetName() string
+	ModTime() time.Time
+	IsDir() bool
+
+	// The internal information of the driver.
+	// If you want to use it, please understand what it means
+	GetID() string
+	GetPath() string
+}
+
+type FileStreamer interface {
+	io.ReadCloser
+	Obj
+	GetMimetype() string
+	SetReadCloser(io.ReadCloser)
+	NeedStore() bool
+	GetReadCloser() io.ReadCloser
+	GetOld() Obj
+}
+
+type URL interface {
+	URL() string
+}
+
+type Thumb interface {
+	Thumb() string
+}
+
+type SetPath interface {
+	SetPath(path string)
+}
+
+func SortFiles(objs []Obj, orderBy, orderDirection string) {
+	if orderBy == "" {
+		return
+	}
+	sort.Slice(objs, func(i, j int) bool {
+		switch orderBy {
+		case "name":
+			{
+				c := natural.Less(objs[i].GetName(), objs[j].GetName())
+				if orderDirection == "desc" {
+					return !c
+				}
+				return c
+			}
+		case "size":
+			{
+				if orderDirection == "desc" {
+					return objs[i].GetSize() >= objs[j].GetSize()
+				}
+				return objs[i].GetSize() <= objs[j].GetSize()
+			}
+		case "modified":
+			if orderDirection == "desc" {
+				return objs[i].ModTime().After(objs[j].ModTime())
+			}
+			return objs[i].ModTime().Before(objs[j].ModTime())
+		}
+		return false
+	})
+}
+
+func ExtractFolder(objs []Obj, extractFolder string) {
+	if extractFolder == "" {
+		return
+	}
+	front := extractFolder == "front"
+	sort.SliceStable(objs, func(i, j int) bool {
+		if objs[i].IsDir() || objs[j].IsDir() {
+			if !objs[i].IsDir() {
+				return !front
+			}
+			if !objs[j].IsDir() {
+				return front
+			}
+		}
+		return false
+	})
+}
+
+// Wrap
+func WrapObjName(objs Obj) Obj {
+	return &ObjWrapName{Obj: objs}
+}
+
+func WrapObjsName(objs []Obj) {
+	for i := 0; i < len(objs); i++ {
+		objs[i] = &ObjWrapName{Obj: objs[i]}
+	}
+}
+
+func UnwrapObjs(obj Obj) Obj {
+	if unwrap, ok := obj.(UnwrapObj); ok {
+		obj = unwrap.Unwrap()
+	}
+	return obj
+}
+
+func GetThumb(obj Obj) (thumb string, ok bool) {
+	if obj, ok := obj.(Thumb); ok {
+		return obj.Thumb(), true
+	}
+	if unwrap, ok := obj.(UnwrapObj); ok {
+		return GetThumb(unwrap.Unwrap())
+	}
+	return thumb, false
+}
+
+func GetUrl(obj Obj) (url string, ok bool) {
+	if obj, ok := obj.(URL); ok {
+		return obj.URL(), true
+	}
+	if unwrap, ok := obj.(UnwrapObj); ok {
+		return GetUrl(unwrap.Unwrap())
+	}
+	return url, false
+}
+
+// Merge
+func NewObjMerge() *ObjMerge {
+	return &ObjMerge{
+		set: mapset.NewSet[string](),
+	}
+}
+
+type ObjMerge struct {
+	regs []*regexp.Regexp
+	set  mapset.Set[string]
+}
+
+func (om *ObjMerge) Merge(objs []Obj, objs_ ...Obj) []Obj {
+	newObjs := make([]Obj, 0, len(objs)+len(objs_))
+	newObjs = om.insertObjs(om.insertObjs(newObjs, objs...), objs_...)
+	return newObjs
+}
+
+func (om *ObjMerge) insertObjs(objs []Obj, objs_ ...Obj) []Obj {
+	for _, obj := range objs_ {
+		if om.clickObj(obj) {
+			objs = append(objs, obj)
+		}
+	}
+	return objs
+}
+
+func (om *ObjMerge) clickObj(obj Obj) bool {
+	for _, reg := range om.regs {
+		if reg.MatchString(obj.GetName()) {
+			return false
+		}
+	}
+	return om.set.Add(obj.GetName())
+}
+
+func (om *ObjMerge) InitHideReg(hides string) {
+	rs := strings.Split(hides, "\n")
+	om.regs = make([]*regexp.Regexp, 0, len(rs))
+	for _, r := range rs {
+		om.regs = append(om.regs, regexp.MustCompile(r))
+	}
+}
+
+func (om *ObjMerge) Reset() {
+	om.set.Clear()
+}

+ 90 - 0
model/object.go

@@ -0,0 +1,90 @@
+package model
+
+import (
+	"time"
+)
+
+type ObjWrapName struct {
+	Name string
+	Obj
+}
+
+func (o *ObjWrapName) Unwrap() Obj {
+	return o.Obj
+}
+
+func (o *ObjWrapName) GetName() string {
+	if o.Name == "" {
+		o.Name = o.Obj.GetName()
+	}
+	return o.Name
+}
+
+type Object struct {
+	ID       string
+	Path     string
+	Name     string
+	Size     int64
+	Modified time.Time
+	IsFolder bool
+}
+
+func (o *Object) GetName() string {
+	return o.Name
+}
+
+func (o *Object) GetSize() int64 {
+	return o.Size
+}
+
+func (o *Object) ModTime() time.Time {
+	return o.Modified
+}
+
+func (o *Object) IsDir() bool {
+	return o.IsFolder
+}
+
+func (o *Object) GetID() string {
+	return o.ID
+}
+
+func (o *Object) GetPath() string {
+	return o.Path
+}
+
+func (o *Object) SetPath(id string) {
+	o.Path = id
+}
+
+type Thumbnail struct {
+	Thumbnail string
+}
+
+type Url struct {
+	Url string
+}
+
+func (w Url) URL() string {
+	return w.Url
+}
+
+func (t Thumbnail) Thumb() string {
+	return t.Thumbnail
+}
+
+type ObjThumb struct {
+	Object
+	Thumbnail
+}
+
+type ObjectURL struct {
+	Object
+	Url
+}
+
+type ObjThumbURL struct {
+	Object
+	Thumbnail
+	Url
+}

+ 20 - 0
model/req.go

@@ -0,0 +1,20 @@
+package model
+
+type PageReq struct {
+	Page    int `json:"page" form:"page"`
+	PerPage int `json:"per_page" form:"per_page"`
+}
+
+const MaxUint = ^uint(0)
+const MinUint = 0
+const MaxInt = int(MaxUint >> 1)
+const MinInt = -MaxInt - 1
+
+func (p *PageReq) Validate() {
+	if p.Page < 1 {
+		p.Page = 1
+	}
+	if p.PerPage < 1 {
+		p.PerPage = MaxInt
+	}
+}

+ 33 - 0
model/setting.go

@@ -0,0 +1,33 @@
+package model
+
+const (
+	SINGLE = iota
+	SITE
+	STYLE
+	PREVIEW
+	GLOBAL
+	ARIA2
+	INDEX
+	GITHUB
+)
+
+const (
+	PUBLIC = iota
+	PRIVATE
+	READONLY
+	DEPRECATED
+)
+
+type SettingItem struct {
+	Key     string `json:"key" gorm:"primaryKey" binding:"required"` // unique key
+	Value   string `json:"value"`                                    // value
+	Help    string `json:"help"`                                     // help message
+	Type    string `json:"type"`                                     // string, number, bool, select
+	Options string `json:"options"`                                  // values for select
+	Group   int    `json:"group"`                                    // use to group setting in frontend
+	Flag    int    `json:"flag"`                                     // 0 = public, 1 = private, 2 = readonly, 3 = deprecated, etc.
+}
+
+func (s SettingItem) IsDeprecated() bool {
+	return s.Flag == DEPRECATED
+}

+ 0 - 69
model/smartctl_model.go

@@ -1,69 +0,0 @@
-package model
-
-//
-type SmartctlA struct {
-	Smartctl struct {
-		Version      []int    `json:"version"`
-		SvnRevision  string   `json:"svn_revision"`
-		PlatformInfo string   `json:"platform_info"`
-		BuildInfo    string   `json:"build_info"`
-		Argv         []string `json:"argv"`
-		ExitStatus   int      `json:"exit_status"`
-	} `json:"smartctl"`
-	Device struct {
-		Name     string `json:"name"`
-		InfoName string `json:"info_name"`
-		Type     string `json:"type"`
-		Protocol string `json:"protocol"`
-	} `json:"device"`
-	ModelName       string `json:"model_name"`
-	SerialNumber    string `json:"serial_number"`
-	FirmwareVersion string `json:"firmware_version"`
-	UserCapacity    struct {
-		Blocks int   `json:"blocks"`
-		Bytes  int64 `json:"bytes"`
-	} `json:"user_capacity"`
-	SmartStatus struct {
-		Passed bool `json:"passed"`
-	} `json:"smart_status"`
-	AtaSmartData struct {
-		OfflineDataCollection struct {
-			Status struct {
-				Value  int    `json:"value"`
-				String string `json:"string"`
-			} `json:"status"`
-			CompletionSeconds int `json:"completion_seconds"`
-		} `json:"offline_data_collection"`
-		SelfTest struct {
-			Status struct {
-				Value  int    `json:"value"`
-				String string `json:"string"`
-				Passed bool   `json:"passed"`
-			} `json:"status"`
-			PollingMinutes struct {
-				Short      int `json:"short"`
-				Extended   int `json:"extended"`
-				Conveyance int `json:"conveyance"`
-			} `json:"polling_minutes"`
-		} `json:"self_test"`
-		Capabilities struct {
-			Values                        []int `json:"values"`
-			ExecOfflineImmediateSupported bool  `json:"exec_offline_immediate_supported"`
-			OfflineIsAbortedUponNewCmd    bool  `json:"offline_is_aborted_upon_new_cmd"`
-			OfflineSurfaceScanSupported   bool  `json:"offline_surface_scan_supported"`
-			SelfTestsSupported            bool  `json:"self_tests_supported"`
-			ConveyanceSelfTestSupported   bool  `json:"conveyance_self_test_supported"`
-			SelectiveSelfTestSupported    bool  `json:"selective_self_test_supported"`
-			AttributeAutosaveEnabled      bool  `json:"attribute_autosave_enabled"`
-			ErrorLoggingSupported         bool  `json:"error_logging_supported"`
-			GpLoggingSupported            bool  `json:"gp_logging_supported"`
-		} `json:"capabilities"`
-	} `json:"ata_smart_data"`
-	PowerOnTime struct {
-		Hours int `json:"hours"`
-	} `json:"power_on_time"`
-	PowerCycleCount int `json:"power_cycle_count"`
-	Temperature     struct {
-		Current int `json:"current"`
-	} `json:"temperature"`
-}

+ 54 - 0
model/storage.go

@@ -0,0 +1,54 @@
+package model
+
+import "time"
+
+type Storage struct {
+	ID              uint      `json:"id" gorm:"primaryKey"`                        // unique key
+	MountPath       string    `json:"mount_path" gorm:"unique" binding:"required"` // must be standardized
+	Order           int       `json:"order"`                                       // use to sort
+	Driver          string    `json:"driver"`                                      // driver used
+	CacheExpiration int       `json:"cache_expiration"`                            // cache expire time
+	Status          string    `json:"status"`
+	Addition        string    `json:"addition" gorm:"type:text"` // Additional information, defined in the corresponding driver
+	Remark          string    `json:"remark"`
+	Modified        time.Time `json:"modified"`
+	Disabled        bool      `json:"disabled"` // if disabled
+	Sort
+	Proxy
+}
+
+type Sort struct {
+	OrderBy        string `json:"order_by"`
+	OrderDirection string `json:"order_direction"`
+	ExtractFolder  string `json:"extract_folder"`
+}
+
+type Proxy struct {
+	WebProxy     bool   `json:"web_proxy"`
+	WebdavPolicy string `json:"webdav_policy"`
+	DownProxyUrl string `json:"down_proxy_url"`
+}
+
+func (s *Storage) GetStorage() *Storage {
+	return s
+}
+
+func (s *Storage) SetStorage(storage Storage) {
+	*s = storage
+}
+
+func (s *Storage) SetStatus(status string) {
+	s.Status = status
+}
+
+func (p Proxy) Webdav302() bool {
+	return p.WebdavPolicy == "302_redirect"
+}
+
+func (p Proxy) WebdavProxy() bool {
+	return p.WebdavPolicy == "use_proxy_url"
+}
+
+func (p Proxy) WebdavNative() bool {
+	return !p.Webdav302() && !p.WebdavProxy()
+}

+ 33 - 0
model/stream.go

@@ -0,0 +1,33 @@
+package model
+
+import (
+	"io"
+)
+
+type FileStream struct {
+	Obj
+	io.ReadCloser
+	Mimetype     string
+	WebPutAsTask bool
+	Old          Obj
+}
+
+func (f *FileStream) GetMimetype() string {
+	return f.Mimetype
+}
+
+func (f *FileStream) NeedStore() bool {
+	return f.WebPutAsTask
+}
+
+func (f *FileStream) GetReadCloser() io.ReadCloser {
+	return f.ReadCloser
+}
+
+func (f *FileStream) SetReadCloser(rc io.ReadCloser) {
+	f.ReadCloser = rc
+}
+
+func (f *FileStream) GetOld() Obj {
+	return f.Old
+}

+ 12 - 0
pkg/fs/fs.go

@@ -0,0 +1,12 @@
+package fs
+
+import "io"
+
+// CheckClose is a utility function used to check the return from
+// Close in a defer statement.
+func CheckClose(c io.Closer, err *error) {
+	cerr := c.Close()
+	if *err == nil {
+		*err = cerr
+	}
+}

+ 412 - 0
pkg/generic_sync/generic_sync.go

@@ -0,0 +1,412 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package generic_sync
+
+import (
+	"sync"
+	"sync/atomic"
+	"unsafe"
+)
+
+// MapOf is like a Go map[interface{}]interface{} but is safe for concurrent use
+// by multiple goroutines without additional locking or coordination.
+// Loads, stores, and deletes run in amortized constant time.
+//
+// The MapOf type is specialized. Most code should use a plain Go map instead,
+// with separate locking or coordination, for better type safety and to make it
+// easier to maintain other invariants along with the map content.
+//
+// The MapOf type is optimized for two common use cases: (1) when the entry for a given
+// key is only ever written once but read many times, as in caches that only grow,
+// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
+// sets of keys. In these two cases, use of a MapOf may significantly reduce lock
+// contention compared to a Go map paired with a separate Mutex or RWMutex.
+//
+// The zero MapOf is empty and ready for use. A MapOf must not be copied after first use.
+type MapOf[K comparable, V any] struct {
+	mu sync.Mutex
+
+	// read contains the portion of the map's contents that are safe for
+	// concurrent access (with or without mu held).
+	//
+	// The read field itself is always safe to load, but must only be stored with
+	// mu held.
+	//
+	// Entries stored in read may be updated concurrently without mu, but updating
+	// a previously-expunged entry requires that the entry be copied to the dirty
+	// map and unexpunged with mu held.
+	read atomic.Value // readOnly
+
+	// dirty contains the portion of the map's contents that require mu to be
+	// held. To ensure that the dirty map can be promoted to the read map quickly,
+	// it also includes all of the non-expunged entries in the read map.
+	//
+	// Expunged entries are not stored in the dirty map. An expunged entry in the
+	// clean map must be unexpunged and added to the dirty map before a new value
+	// can be stored to it.
+	//
+	// If the dirty map is nil, the next write to the map will initialize it by
+	// making a shallow copy of the clean map, omitting stale entries.
+	dirty map[K]*entry[V]
+
+	// misses counts the number of loads since the read map was last updated that
+	// needed to lock mu to determine whether the key was present.
+	//
+	// Once enough misses have occurred to cover the cost of copying the dirty
+	// map, the dirty map will be promoted to the read map (in the unamended
+	// state) and the next store to the map will make a new dirty copy.
+	misses int
+}
+
+// readOnly is an immutable struct stored atomically in the MapOf.read field.
+type readOnly[K comparable, V any] struct {
+	m       map[K]*entry[V]
+	amended bool // true if the dirty map contains some key not in m.
+}
+
+// expunged is an arbitrary pointer that marks entries which have been deleted
+// from the dirty map.
+var expunged = unsafe.Pointer(new(interface{}))
+
+// An entry is a slot in the map corresponding to a particular key.
+type entry[V any] struct {
+	// p points to the interface{} value stored for the entry.
+	//
+	// If p == nil, the entry has been deleted and m.dirty == nil.
+	//
+	// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
+	// is missing from m.dirty.
+	//
+	// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
+	// != nil, in m.dirty[key].
+	//
+	// An entry can be deleted by atomic replacement with nil: when m.dirty is
+	// next created, it will atomically replace nil with expunged and leave
+	// m.dirty[key] unset.
+	//
+	// An entry's associated value can be updated by atomic replacement, provided
+	// p != expunged. If p == expunged, an entry's associated value can be updated
+	// only after first setting m.dirty[key] = e so that lookups using the dirty
+	// map find the entry.
+	p unsafe.Pointer // *interface{}
+}
+
+func newEntry[V any](i V) *entry[V] {
+	return &entry[V]{p: unsafe.Pointer(&i)}
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
+	read, _ := m.read.Load().(readOnly[K, V])
+	e, ok := read.m[key]
+	if !ok && read.amended {
+		m.mu.Lock()
+		// Avoid reporting a spurious miss if m.dirty got promoted while we were
+		// blocked on m.mu. (If further loads of the same key will not miss, it's
+		// not worth copying the dirty map for this key.)
+		read, _ = m.read.Load().(readOnly[K, V])
+		e, ok = read.m[key]
+		if !ok && read.amended {
+			e, ok = m.dirty[key]
+			// Regardless of whether the entry was present, record a miss: this key
+			// will take the slow path until the dirty map is promoted to the read
+			// map.
+			m.missLocked()
+		}
+		m.mu.Unlock()
+	}
+	if !ok {
+		return value, false
+	}
+	return e.load()
+}
+
+func (m *MapOf[K, V]) Has(key K) bool {
+	_, ok := m.Load(key)
+	return ok
+}
+
+func (e *entry[V]) load() (value V, ok bool) {
+	p := atomic.LoadPointer(&e.p)
+	if p == nil || p == expunged {
+		return value, false
+	}
+	return *(*V)(p), true
+}
+
+// Store sets the value for a key.
+func (m *MapOf[K, V]) Store(key K, value V) {
+	read, _ := m.read.Load().(readOnly[K, V])
+	if e, ok := read.m[key]; ok && e.tryStore(&value) {
+		return
+	}
+
+	m.mu.Lock()
+	read, _ = m.read.Load().(readOnly[K, V])
+	if e, ok := read.m[key]; ok {
+		if e.unexpungeLocked() {
+			// The entry was previously expunged, which implies that there is a
+			// non-nil dirty map and this entry is not in it.
+			m.dirty[key] = e
+		}
+		e.storeLocked(&value)
+	} else if e, ok := m.dirty[key]; ok {
+		e.storeLocked(&value)
+	} else {
+		if !read.amended {
+			// We're adding the first new key to the dirty map.
+			// Make sure it is allocated and mark the read-only map as incomplete.
+			m.dirtyLocked()
+			m.read.Store(readOnly[K, V]{m: read.m, amended: true})
+		}
+		m.dirty[key] = newEntry(value)
+	}
+	m.mu.Unlock()
+}
+
+// tryStore stores a value if the entry has not been expunged.
+//
+// If the entry is expunged, tryStore returns false and leaves the entry
+// unchanged.
+func (e *entry[V]) tryStore(i *V) bool {
+	for {
+		p := atomic.LoadPointer(&e.p)
+		if p == expunged {
+			return false
+		}
+		if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
+			return true
+		}
+	}
+}
+
+// unexpungeLocked ensures that the entry is not marked as expunged.
+//
+// If the entry was previously expunged, it must be added to the dirty map
+// before m.mu is unlocked.
+func (e *entry[V]) unexpungeLocked() (wasExpunged bool) {
+	return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
+}
+
+// storeLocked unconditionally stores a value to the entry.
+//
+// The entry must be known not to be expunged.
+func (e *entry[V]) storeLocked(i *V) {
+	atomic.StorePointer(&e.p, unsafe.Pointer(i))
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
+	// Avoid locking if it's a clean hit.
+	read, _ := m.read.Load().(readOnly[K, V])
+	if e, ok := read.m[key]; ok {
+		actual, loaded, ok := e.tryLoadOrStore(value)
+		if ok {
+			return actual, loaded
+		}
+	}
+
+	m.mu.Lock()
+	read, _ = m.read.Load().(readOnly[K, V])
+	if e, ok := read.m[key]; ok {
+		if e.unexpungeLocked() {
+			m.dirty[key] = e
+		}
+		actual, loaded, _ = e.tryLoadOrStore(value)
+	} else if e, ok := m.dirty[key]; ok {
+		actual, loaded, _ = e.tryLoadOrStore(value)
+		m.missLocked()
+	} else {
+		if !read.amended {
+			// We're adding the first new key to the dirty map.
+			// Make sure it is allocated and mark the read-only map as incomplete.
+			m.dirtyLocked()
+			m.read.Store(readOnly[K, V]{m: read.m, amended: true})
+		}
+		m.dirty[key] = newEntry(value)
+		actual, loaded = value, false
+	}
+	m.mu.Unlock()
+
+	return actual, loaded
+}
+
+// tryLoadOrStore atomically loads or stores a value if the entry is not
+// expunged.
+//
+// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
+// returns with ok==false.
+func (e *entry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) {
+	p := atomic.LoadPointer(&e.p)
+	if p == expunged {
+		return actual, false, false
+	}
+	if p != nil {
+		return *(*V)(p), true, true
+	}
+
+	// Copy the interface after the first load to make this method more amenable
+	// to escape analysis: if we hit the "load" path or the entry is expunged, we
+	// shouldn'V bother heap-allocating.
+	ic := i
+	for {
+		if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
+			return i, false, true
+		}
+		p = atomic.LoadPointer(&e.p)
+		if p == expunged {
+			return actual, false, false
+		}
+		if p != nil {
+			return *(*V)(p), true, true
+		}
+	}
+}
+
+// Delete deletes the value for a key.
+func (m *MapOf[K, V]) Delete(key K) {
+	read, _ := m.read.Load().(readOnly[K, V])
+	e, ok := read.m[key]
+	if !ok && read.amended {
+		m.mu.Lock()
+		read, _ = m.read.Load().(readOnly[K, V])
+		e, ok = read.m[key]
+		if !ok && read.amended {
+			delete(m.dirty, key)
+		}
+		m.mu.Unlock()
+	}
+	if ok {
+		e.delete()
+	}
+}
+
+func (e *entry[V]) delete() (hadValue bool) {
+	for {
+		p := atomic.LoadPointer(&e.p)
+		if p == nil || p == expunged {
+			return false
+		}
+		if atomic.CompareAndSwapPointer(&e.p, p, nil) {
+			return true
+		}
+	}
+}
+
+// Range calls f sequentially for each key and value present in the map.
+// If f returns false, range stops the iteration.
+//
+// Range does not necessarily correspond to any consistent snapshot of the MapOf's
+// contents: no key will be visited more than once, but if the value for any key
+// is stored or deleted concurrently, Range may reflect any mapping for that key
+// from any point during the Range call.
+//
+// Range may be O(N) with the number of elements in the map even if f returns
+// false after a constant number of calls.
+func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
+	// We need to be able to iterate over all of the keys that were already
+	// present at the start of the call to Range.
+	// If read.amended is false, then read.m satisfies that property without
+	// requiring us to hold m.mu for a long time.
+	read, _ := m.read.Load().(readOnly[K, V])
+	if read.amended {
+		// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
+		// (assuming the caller does not break out early), so a call to Range
+		// amortizes an entire copy of the map: we can promote the dirty copy
+		// immediately!
+		m.mu.Lock()
+		read, _ = m.read.Load().(readOnly[K, V])
+		if read.amended {
+			read = readOnly[K, V]{m: m.dirty}
+			m.read.Store(read)
+			m.dirty = nil
+			m.misses = 0
+		}
+		m.mu.Unlock()
+	}
+
+	for k, e := range read.m {
+		v, ok := e.load()
+		if !ok {
+			continue
+		}
+		if !f(k, v) {
+			break
+		}
+	}
+}
+
+// Values returns a slice of the values in the map.
+func (m *MapOf[K, V]) Values() []V {
+	var values []V
+	m.Range(func(key K, value V) bool {
+		values = append(values, value)
+		return true
+	})
+	return values
+}
+
+func (m *MapOf[K, V]) Count() int {
+	return len(m.dirty)
+}
+
+func (m *MapOf[K, V]) Empty() bool {
+	return m.Count() == 0
+}
+
+func (m *MapOf[K, V]) ToMap() map[K]V {
+	ans := make(map[K]V)
+	m.Range(func(key K, value V) bool {
+		ans[key] = value
+		return true
+	})
+	return ans
+}
+
+func (m *MapOf[K, V]) Clear() {
+	m.Range(func(key K, value V) bool {
+		m.Delete(key)
+		return true
+	})
+}
+
+func (m *MapOf[K, V]) missLocked() {
+	m.misses++
+	if m.misses < len(m.dirty) {
+		return
+	}
+	m.read.Store(readOnly[K, V]{m: m.dirty})
+	m.dirty = nil
+	m.misses = 0
+}
+
+func (m *MapOf[K, V]) dirtyLocked() {
+	if m.dirty != nil {
+		return
+	}
+
+	read, _ := m.read.Load().(readOnly[K, V])
+	m.dirty = make(map[K]*entry[V], len(read.m))
+	for k, e := range read.m {
+		if !e.tryExpungeLocked() {
+			m.dirty[k] = e
+		}
+	}
+}
+
+func (e *entry[V]) tryExpungeLocked() (isExpunged bool) {
+	p := atomic.LoadPointer(&e.p)
+	for p == nil {
+		if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
+			return true
+		}
+		p = atomic.LoadPointer(&e.p)
+	}
+	return p == expunged
+}

+ 52 - 0
pkg/sign/hmac.go

@@ -0,0 +1,52 @@
+package sign
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+	"encoding/base64"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type HMACSign struct {
+	SecretKey []byte
+}
+
+func (s HMACSign) Sign(data string, expire int64) string {
+	h := hmac.New(sha256.New, s.SecretKey)
+	expireTimeStamp := strconv.FormatInt(expire, 10)
+	_, err := io.WriteString(h, data+":"+expireTimeStamp)
+	if err != nil {
+		return ""
+	}
+
+	return base64.URLEncoding.EncodeToString(h.Sum(nil)) + ":" + expireTimeStamp
+}
+
+func (s HMACSign) Verify(data, sign string) error {
+	signSlice := strings.Split(sign, ":")
+	// check whether contains expire time
+	if signSlice[len(signSlice)-1] == "" {
+		return ErrExpireMissing
+	}
+	// check whether expire time is expired
+	expires, err := strconv.ParseInt(signSlice[len(signSlice)-1], 10, 64)
+	if err != nil {
+		return ErrExpireInvalid
+	}
+	// if expire time is expired, return error
+	if expires < time.Now().Unix() && expires != 0 {
+		return ErrSignExpired
+	}
+	// verify sign
+	if s.Sign(data, expires) != sign {
+		return ErrSignInvalid
+	}
+	return nil
+}
+
+func NewHMACSign(secret []byte) Sign {
+	return HMACSign{SecretKey: secret}
+}

+ 15 - 0
pkg/sign/sign.go

@@ -0,0 +1,15 @@
+package sign
+
+import "errors"
+
+type Sign interface {
+	Sign(data string, expire int64) string
+	Verify(data, sign string) error
+}
+
+var (
+	ErrSignExpired   = errors.New("sign expired")
+	ErrSignInvalid   = errors.New("sign invalid")
+	ErrExpireInvalid = errors.New("expire invalid")
+	ErrExpireMissing = errors.New("expire missing")
+)

+ 212 - 0
pkg/singleflight/singleflight.go

@@ -0,0 +1,212 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package singleflight provides a duplicate function call suppression
+// mechanism.
+package singleflight
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"runtime"
+	"runtime/debug"
+	"sync"
+)
+
+// errGoexit indicates the runtime.Goexit was called in
+// the user given function.
+var errGoexit = errors.New("runtime.Goexit was called")
+
+// A panicError is an arbitrary value recovered from a panic
+// with the stack trace during the execution of given function.
+type panicError struct {
+	value any
+	stack []byte
+}
+
+// Error implements error interface.
+func (p *panicError) Error() string {
+	return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
+}
+
+func newPanicError(v any) error {
+	stack := debug.Stack()
+
+	// The first line of the stack trace is of the form "goroutine N [status]:"
+	// but by the time the panic reaches Do the goroutine may no longer exist
+	// and its status will have changed. Trim out the misleading line.
+	if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
+		stack = stack[line+1:]
+	}
+	return &panicError{value: v, stack: stack}
+}
+
+// call is an in-flight or completed singleflight.Do call
+type call[T any] struct {
+	wg sync.WaitGroup
+
+	// These fields are written once before the WaitGroup is done
+	// and are only read after the WaitGroup is done.
+	val T
+	err error
+
+	// forgotten indicates whether Forget was called with this call's key
+	// while the call was still in flight.
+	forgotten bool
+
+	// These fields are read and written with the singleflight
+	// mutex held before the WaitGroup is done, and are read but
+	// not written after the WaitGroup is done.
+	dups  int
+	chans []chan<- Result[T]
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group[T any] struct {
+	mu sync.Mutex          // protects m
+	m  map[string]*call[T] // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result[T any] struct {
+	Val    T
+	Err    error
+	Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group[T]) Do(key string, fn func() (T, error)) (v T, err error, shared bool) {
+	g.mu.Lock()
+	if g.m == nil {
+		g.m = make(map[string]*call[T])
+	}
+	if c, ok := g.m[key]; ok {
+		c.dups++
+		g.mu.Unlock()
+		c.wg.Wait()
+
+		if e, ok := c.err.(*panicError); ok {
+			panic(e)
+		} else if c.err == errGoexit {
+			runtime.Goexit()
+		}
+		return c.val, c.err, true
+	}
+	c := new(call[T])
+	c.wg.Add(1)
+	g.m[key] = c
+	g.mu.Unlock()
+
+	g.doCall(c, key, fn)
+	return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+//
+// The returned channel will not be closed.
+func (g *Group[T]) DoChan(key string, fn func() (T, error)) <-chan Result[T] {
+	ch := make(chan Result[T], 1)
+	g.mu.Lock()
+	if g.m == nil {
+		g.m = make(map[string]*call[T])
+	}
+	if c, ok := g.m[key]; ok {
+		c.dups++
+		c.chans = append(c.chans, ch)
+		g.mu.Unlock()
+		return ch
+	}
+	c := &call[T]{chans: []chan<- Result[T]{ch}}
+	c.wg.Add(1)
+	g.m[key] = c
+	g.mu.Unlock()
+
+	go g.doCall(c, key, fn)
+
+	return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group[T]) doCall(c *call[T], key string, fn func() (T, error)) {
+	normalReturn := false
+	recovered := false
+
+	// use double-defer to distinguish panic from runtime.Goexit,
+	// more details see https://golang.org/cl/134395
+	defer func() {
+		// the given function invoked runtime.Goexit
+		if !normalReturn && !recovered {
+			c.err = errGoexit
+		}
+
+		c.wg.Done()
+		g.mu.Lock()
+		defer g.mu.Unlock()
+		if !c.forgotten {
+			delete(g.m, key)
+		}
+
+		if e, ok := c.err.(*panicError); ok {
+			// In order to prevent the waiting channels from being blocked forever,
+			// needs to ensure that this panic cannot be recovered.
+			if len(c.chans) > 0 {
+				go panic(e)
+				select {} // Keep this goroutine around so that it will appear in the crash dump.
+			} else {
+				panic(e)
+			}
+		} else if c.err == errGoexit {
+			// Already in the process of goexit, no need to call again
+		} else {
+			// Normal return
+			for _, ch := range c.chans {
+				ch <- Result[T]{c.val, c.err, c.dups > 0}
+			}
+		}
+	}()
+
+	func() {
+		defer func() {
+			if !normalReturn {
+				// Ideally, we would wait to take a stack trace until we've determined
+				// whether this is a panic or a runtime.Goexit.
+				//
+				// Unfortunately, the only way we can distinguish the two is to see
+				// whether the recover stopped the goroutine from terminating, and by
+				// the time we know that, the part of the stack trace relevant to the
+				// panic has been discarded.
+				if r := recover(); r != nil {
+					c.err = newPanicError(r)
+				}
+			}
+		}()
+
+		c.val, c.err = fn()
+		normalReturn = true
+	}()
+
+	if !normalReturn {
+		recovered = true
+	}
+}
+
+// Forget tells the singleflight to forget about a key.  Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group[T]) Forget(key string) {
+	g.mu.Lock()
+	if c, ok := g.m[key]; ok {
+		c.forgotten = true
+	}
+	delete(g.m, key)
+	g.mu.Unlock()
+}

+ 2 - 1
pkg/sqlite/db.go

@@ -14,6 +14,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
 	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/model"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
 	model2 "github.com/IceWhaleTech/CasaOS/service/model"
 	model2 "github.com/IceWhaleTech/CasaOS/service/model"
 	"github.com/glebarez/sqlite"
 	"github.com/glebarez/sqlite"
@@ -42,7 +43,7 @@ func GetDb(dbPath string) *gorm.DB {
 	}
 	}
 	gdb = db
 	gdb = db
 
 
-	err = db.AutoMigrate(&model2.AppNotify{}, model2.SharesDBModel{}, model2.ConnectionsDBModel{})
+	err = db.AutoMigrate(&model2.AppNotify{}, model2.SharesDBModel{}, model2.ConnectionsDBModel{}, model.Storage{})
 	db.Exec("DROP TABLE IF EXISTS o_application")
 	db.Exec("DROP TABLE IF EXISTS o_application")
 	db.Exec("DROP TABLE IF EXISTS o_friend")
 	db.Exec("DROP TABLE IF EXISTS o_friend")
 	db.Exec("DROP TABLE IF EXISTS o_person_download")
 	db.Exec("DROP TABLE IF EXISTS o_person_download")

+ 18 - 0
pkg/utils/balance.go

@@ -0,0 +1,18 @@
+package utils
+
+import "strings"
+
+var balance = ".balance"
+
+func IsBalance(str string) bool {
+	return strings.Contains(str, balance)
+}
+
+// GetActualMountPath remove balance suffix
+func GetActualMountPath(virtualPath string) string {
+	bIndex := strings.LastIndex(virtualPath, ".balance")
+	if bIndex != -1 {
+		virtualPath = virtualPath[:bIndex]
+	}
+	return virtualPath
+}

+ 5 - 0
pkg/utils/bool.go

@@ -0,0 +1,5 @@
+package utils
+
+func IsBool(bs ...bool) bool {
+	return len(bs) > 0 && bs[0]
+}

+ 0 - 19
pkg/utils/command/command_helper.go

@@ -2,14 +2,12 @@ package command
 
 
 import (
 import (
 	"bufio"
 	"bufio"
-	"context"
 	"fmt"
 	"fmt"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
 	"path/filepath"
 	"path/filepath"
 	"strings"
 	"strings"
-	"time"
 )
 )
 
 
 func OnlyExec(cmdStr string) {
 func OnlyExec(cmdStr string) {
@@ -98,23 +96,6 @@ func ExecLSBLKByPath(path string) []byte {
 	return output
 	return output
 }
 }
 
 
-// exec smart
-func ExecSmartCTLByPath(path string) []byte {
-	timeout := 3
-	ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
-	defer cancel()
-	output, err := exec.CommandContext(ctx, "smartctl", "-a", path, "-j").Output()
-	if err != nil {
-		fmt.Println("smartctl", err)
-		return nil
-	}
-	return output
-}
-
-func ExecEnabledSMART(path string) {
-	exec.Command("smartctl", "-s on", path).Output()
-}
-
 func ExecuteScripts(scriptDirectory string) {
 func ExecuteScripts(scriptDirectory string) {
 	if _, err := os.Stat(scriptDirectory); os.IsNotExist(err) {
 	if _, err := os.Stat(scriptDirectory); os.IsNotExist(err) {
 		fmt.Printf("No post-start scripts at %s\n", scriptDirectory)
 		fmt.Printf("No post-start scripts at %s\n", scriptDirectory)

+ 14 - 0
pkg/utils/ctx.go

@@ -0,0 +1,14 @@
+package utils
+
+import (
+	"context"
+)
+
+func IsCanceled(ctx context.Context) bool {
+	select {
+	case <-ctx.Done():
+		return true
+	default:
+		return false
+	}
+}

+ 151 - 0
pkg/utils/httper/drive.go

@@ -0,0 +1,151 @@
+package httper
+
+import (
+	"encoding/json"
+	"fmt"
+	"net"
+	"net/http"
+	"time"
+
+	"github.com/go-resty/resty/v2"
+)
+
+type MountList struct {
+	MountPoints []struct {
+		MountPoint string `json:"MountPoint"`
+		Fs         string `json:"Fs"`
+		Icon       string `json:"Icon"`
+	} `json:"mountPoints"`
+}
+type MountResult struct {
+	Error string `json:"error"`
+	Input struct {
+		Fs         string `json:"fs"`
+		MountPoint string `json:"mountPoint"`
+	} `json:"input"`
+	Path   string `json:"path"`
+	Status int    `json:"status"`
+}
+
+type RemotesResult struct {
+	Remotes []string `json:"remotes"`
+}
+
+var UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
+var DefaultTimeout = time.Second * 30
+
+func NewRestyClient() *resty.Client {
+
+	unixSocket := "/tmp/rclone.sock"
+
+	transport := http.Transport{
+		Dial: func(_, _ string) (net.Conn, error) {
+			return net.Dial("unix", unixSocket)
+		},
+	}
+
+	client := resty.New()
+
+	client.SetTransport(&transport).SetBaseURL("http://localhost")
+	client.SetRetryCount(3).SetRetryWaitTime(5*time.Second).SetTimeout(DefaultTimeout).SetHeader("User-Agent", UserAgent)
+	return client
+}
+
+func GetMountList() (MountList, error) {
+	var result MountList
+	res, err := NewRestyClient().R().Post("/mount/listmounts")
+	if err != nil {
+		return result, err
+	}
+	if res.StatusCode() != 200 {
+		return result, fmt.Errorf("get mount list failed")
+	}
+	json.Unmarshal(res.Body(), &result)
+	for i := 0; i < len(result.MountPoints); i++ {
+		result.MountPoints[i].Fs = result.MountPoints[i].Fs[:len(result.MountPoints[i].Fs)-1]
+	}
+	return result, err
+}
+func Mount(mountPoint string, fs string) error {
+	res, err := NewRestyClient().R().SetFormData(map[string]string{
+		"mountPoint": mountPoint,
+		"fs":         fs,
+	}).Post("/mount/mount")
+	if err != nil {
+		return err
+	}
+	if res.StatusCode() != 200 {
+		return fmt.Errorf("mount failed")
+	}
+	return nil
+}
+func Unmount(mountPoint string) error {
+	res, err := NewRestyClient().R().SetFormData(map[string]string{
+		"mountPoint": mountPoint,
+	}).Post("/mount/unmount")
+	if err != nil {
+		return err
+	}
+	if res.StatusCode() != 200 {
+		return fmt.Errorf("unmount failed")
+	}
+	return nil
+}
+
+func CreateConfig(data map[string]string, name, t string) error {
+	data["config_is_local"] = "false"
+	dataStr, _ := json.Marshal(data)
+	res, err := NewRestyClient().R().SetFormData(map[string]string{
+		"name":       name,
+		"parameters": string(dataStr),
+		"type":       t,
+	}).Post("/config/create")
+	if err != nil {
+		return err
+	}
+	if res.StatusCode() != 200 {
+		return fmt.Errorf("create config failed")
+	}
+	return nil
+}
+
+func GetConfigByName(name string) (map[string]string, error) {
+
+	res, err := NewRestyClient().R().SetFormData(map[string]string{
+		"name": name,
+	}).Post("/config/get")
+	if err != nil {
+		return nil, err
+	}
+	if res.StatusCode() != 200 {
+		return nil, fmt.Errorf("create config failed")
+	}
+	var result map[string]string
+	json.Unmarshal(res.Body(), &result)
+	return result, nil
+}
+func GetAllConfigName() (RemotesResult, error) {
+	var result RemotesResult
+	res, err := NewRestyClient().R().SetFormData(map[string]string{}).Post("/config/listremotes")
+	if err != nil {
+		return result, err
+	}
+	if res.StatusCode() != 200 {
+		return result, fmt.Errorf("get config failed")
+	}
+
+	json.Unmarshal(res.Body(), &result)
+	return result, nil
+}
+func DeleteConfigByName(name string) error {
+	res, err := NewRestyClient().R().SetFormData(map[string]string{
+		"name": name,
+	}).Post("/config/delete")
+	if err != nil {
+		return err
+	}
+	if res.StatusCode() != 200 {
+		return fmt.Errorf("delete config failed")
+	}
+	return nil
+}

+ 81 - 0
pkg/utils/path.go

@@ -0,0 +1,81 @@
+package utils
+
+import (
+	"errors"
+	"net/url"
+	stdpath "path"
+	"strings"
+)
+
+// FixAndCleanPath
+// The upper layer of the root directory is still the root directory.
+// So ".." And "." will be cleared
+// for example
+// 1. ".." or "." => "/"
+// 2. "../..." or "./..." => "/..."
+// 3. "../.x." or "./.x." => "/.x."
+// 4. "x//\\y" = > "/z/x"
+func FixAndCleanPath(path string) string {
+	path = strings.ReplaceAll(path, "\\", "/")
+	if !strings.HasPrefix(path, "/") {
+		path = "/" + path
+	}
+	return stdpath.Clean(path)
+}
+
+// PathAddSeparatorSuffix Add path '/' suffix
+// for example /root => /root/
+func PathAddSeparatorSuffix(path string) string {
+	if !strings.HasSuffix(path, "/") {
+		path = path + "/"
+	}
+	return path
+}
+
+// PathEqual judge path is equal
+func PathEqual(path1, path2 string) bool {
+	return FixAndCleanPath(path1) == FixAndCleanPath(path2)
+}
+
+func IsSubPath(path string, subPath string) bool {
+	path, subPath = FixAndCleanPath(path), FixAndCleanPath(subPath)
+	return path == subPath || strings.HasPrefix(subPath, PathAddSeparatorSuffix(path))
+}
+
+func Ext(path string) string {
+	ext := stdpath.Ext(path)
+	if strings.HasPrefix(ext, ".") {
+		return ext[1:]
+	}
+	return ext
+}
+
+func EncodePath(path string, all ...bool) string {
+	seg := strings.Split(path, "/")
+	toReplace := []struct {
+		Src string
+		Dst string
+	}{
+		{Src: "%", Dst: "%25"},
+		{"%", "%25"},
+		{"?", "%3F"},
+		{"#", "%23"},
+	}
+	for i := range seg {
+		if len(all) > 0 && all[0] {
+			seg[i] = url.PathEscape(seg[i])
+		} else {
+			for j := range toReplace {
+				seg[i] = strings.ReplaceAll(seg[i], toReplace[j].Src, toReplace[j].Dst)
+			}
+		}
+	}
+	return strings.Join(seg, "/")
+}
+
+func JoinBasePath(basePath, reqPath string) (string, error) {
+	if strings.HasSuffix(reqPath, "..") || strings.Contains(reqPath, "../") {
+		return "", errors.New("access using relative path is not allowed")
+	}
+	return stdpath.Join(FixAndCleanPath(basePath), FixAndCleanPath(reqPath)), nil
+}

+ 46 - 0
pkg/utils/slice.go

@@ -0,0 +1,46 @@
+package utils
+
+// SliceEqual check if two slices are equal
+func SliceEqual[T comparable](a, b []T) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, v := range a {
+		if v != b[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// SliceContains check if slice contains element
+func SliceContains[T comparable](arr []T, v T) bool {
+	for _, vv := range arr {
+		if vv == v {
+			return true
+		}
+	}
+	return false
+}
+
+// SliceConvert convert slice to another type slice
+func SliceConvert[S any, D any](srcS []S, convert func(src S) (D, error)) ([]D, error) {
+	var res []D
+	for i := range srcS {
+		dst, err := convert(srcS[i])
+		if err != nil {
+			return nil, err
+		}
+		res = append(res, dst)
+	}
+	return res, nil
+}
+
+func MustSliceConvert[S any, D any](srcS []S, convert func(src S) D) []D {
+	var res []D
+	for i := range srcS {
+		dst := convert(srcS[i])
+		res = append(res, dst)
+	}
+	return res
+}

+ 37 - 0
pkg/utils/time.go

@@ -0,0 +1,37 @@
+package utils
+
+import (
+	"sync"
+	"time"
+)
+
+func MustParseCNTime(str string) time.Time {
+	lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
+	return lastOpTime
+}
+
+func NewDebounce(interval time.Duration) func(f func()) {
+	var timer *time.Timer
+	var lock sync.Mutex
+	return func(f func()) {
+		lock.Lock()
+		defer lock.Unlock()
+		if timer != nil {
+			timer.Stop()
+		}
+		timer = time.AfterFunc(interval, f)
+	}
+}
+
+func NewDebounce2(interval time.Duration, f func()) func() {
+	var timer *time.Timer
+	var lock sync.Mutex
+	return func() {
+		lock.Lock()
+		defer lock.Unlock()
+		if timer == nil {
+			timer = time.AfterFunc(interval, f)
+		}
+		(*time.Timer)(timer).Reset(interval)
+	}
+}

+ 5 - 0
route/init.go

@@ -89,4 +89,9 @@ func InitNetworkMount() {
 		connection.Directories = strings.Join(directories, ",")
 		connection.Directories = strings.Join(directories, ",")
 		service.MyService.Connections().UpdateConnection(&connection)
 		service.MyService.Connections().UpdateConnection(&connection)
 	}
 	}
+
+	err := service.MyService.Storage().CheckAndMountAll()
+	if err != nil {
+		logger.Error("mount storage err", zap.Any("err", err))
+	}
 }
 }

+ 13 - 0
route/v1.go

@@ -37,6 +37,7 @@ func InitV1Router() *gin.Engine {
 	r.GET("/ping", func(ctx *gin.Context) {
 	r.GET("/ping", func(ctx *gin.Context) {
 		ctx.String(200, "pong")
 		ctx.String(200, "pong")
 	})
 	})
+	r.GET("/v1/recover/:type", v1.GetRecoverStorage)
 	v1Group := r.Group("/v1")
 	v1Group := r.Group("/v1")
 
 
 	v1Group.Use(jwt.ExceptLocalhost())
 	v1Group.Use(jwt.ExceptLocalhost())
@@ -95,6 +96,18 @@ func InitV1Router() *gin.Engine {
 			v1FileGroup.POST("/upload", v1.PostFileUpload)
 			v1FileGroup.POST("/upload", v1.PostFileUpload)
 			v1FileGroup.GET("/upload", v1.GetFileUpload)
 			v1FileGroup.GET("/upload", v1.GetFileUpload)
 			// v1FileGroup.GET("/download", v1.UserFileDownloadCommonService)
 			// v1FileGroup.GET("/download", v1.UserFileDownloadCommonService)
+
+		}
+		v1StorageGroup := v1Group.Group("/storage")
+		v1StorageGroup.Use()
+		{
+			v1StorageGroup.GET("", v1.ListStorages)
+			v1StorageGroup.DELETE("", v1.DeleteStorage)
+		}
+		v1DriverGroup := v1Group.Group("/driver")
+		v1DriverGroup.Use()
+		{
+			v1DriverGroup.GET("", v1.ListDriverInfo)
 		}
 		}
 		v1FolderGroup := v1Group.Group("/folder")
 		v1FolderGroup := v1Group.Group("/folder")
 		v1FolderGroup.Use()
 		v1FolderGroup.Use()

+ 12 - 0
route/v1/driver.go

@@ -0,0 +1,12 @@
+package v1
+
+import (
+	"github.com/IceWhaleTech/CasaOS/internal/op"
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils/common_err"
+	"github.com/gin-gonic/gin"
+)
+
+func ListDriverInfo(c *gin.Context) {
+	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: op.GetDriverInfoMap()})
+}

+ 250 - 9
route/v1/file.go

@@ -1,6 +1,7 @@
 package v1
 package v1
 
 
 import (
 import (
+	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
@@ -16,10 +17,16 @@ import (
 	"sync"
 	"sync"
 
 
 	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
 	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/internal/conf"
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
 	"github.com/IceWhaleTech/CasaOS/model"
 	"github.com/IceWhaleTech/CasaOS/model"
+
+	"github.com/IceWhaleTech/CasaOS/pkg/utils"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/common_err"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/common_err"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
 	"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
 	"github.com/IceWhaleTech/CasaOS/service"
 	"github.com/IceWhaleTech/CasaOS/service"
+
+	"github.com/IceWhaleTech/CasaOS/internal/sign"
 	"github.com/gin-gonic/gin"
 	"github.com/gin-gonic/gin"
 	uuid "github.com/satori/go.uuid"
 	uuid "github.com/satori/go.uuid"
 	"go.uber.org/zap"
 	"go.uber.org/zap"
@@ -190,6 +197,37 @@ func GetDownloadSingleFile(c *gin.Context) {
 		})
 		})
 		return
 		return
 	}
 	}
+	fileName := path.Base(filePath)
+	// c.Header("Content-Disposition", "inline")
+	c.Header("Content-Disposition", "attachment; filename*=utf-8''"+url2.PathEscape(fileName))
+
+	storage, _ := service.MyService.FsService().GetStorage(filePath)
+	if storage != nil {
+		if shouldProxy(storage, fileName) {
+			Proxy(c)
+			return
+		} else {
+			link, _, err := service.MyService.FsService().Link(c, filePath, model.LinkArgs{
+				IP:     c.ClientIP(),
+				Header: c.Request.Header,
+				Type:   c.Query("type"),
+			})
+			if err != nil {
+				c.JSON(common_err.SERVICE_ERROR, model.Result{
+					Success: common_err.SERVICE_ERROR,
+					Message: common_err.GetMsg(common_err.SERVICE_ERROR),
+					Data:    err.Error(),
+				})
+				return
+
+			}
+			c.Header("Referrer-Policy", "no-referrer")
+			c.Header("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate")
+			c.Redirect(302, link.URL)
+			return
+		}
+	}
+
 	fileTmp, err := os.Open(filePath)
 	fileTmp, err := os.Open(filePath)
 	if err != nil {
 	if err != nil {
 		c.JSON(common_err.SERVICE_ERROR, model.Result{
 		c.JSON(common_err.SERVICE_ERROR, model.Result{
@@ -200,9 +238,6 @@ func GetDownloadSingleFile(c *gin.Context) {
 	}
 	}
 	defer fileTmp.Close()
 	defer fileTmp.Close()
 
 
-	fileName := path.Base(filePath)
-	// c.Header("Content-Disposition", "inline")
-	c.Header("Content-Disposition", "attachment; filename*=utf-8''"+url2.PathEscape(fileName))
 	c.File(filePath)
 	c.File(filePath)
 }
 }
 
 
@@ -215,8 +250,36 @@ func GetDownloadSingleFile(c *gin.Context) {
 // @Success 200 {string} string "ok"
 // @Success 200 {string} string "ok"
 // @Router /file/dirpath [get]
 // @Router /file/dirpath [get]
 func DirPath(c *gin.Context) {
 func DirPath(c *gin.Context) {
-	path := c.DefaultQuery("path", "")
-	info := service.MyService.System().GetDirPath(path)
+	var req ListReq
+	if err := c.ShouldBind(&req); err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.CLIENT_ERROR, Message: common_err.GetMsg(common_err.CLIENT_ERROR), Data: err.Error()})
+		return
+	}
+	req.Validate()
+	storage, _, _ := service.MyService.StoragePath().GetStorageAndActualPath(req.Path)
+	if storage != nil {
+		req.Validate()
+		objs, err := service.MyService.FsService().FList(c, req.Path, req.Refresh)
+		if err != nil {
+			c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+			return
+		}
+		total, objs := pagination(objs, &req.PageReq)
+		provider := "unknown"
+		storage, err := service.MyService.FsService().GetStorage(req.Path)
+		if err == nil {
+			provider = storage.GetStorage().Driver
+		}
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: FsListResp{
+			Content:  toObjsResp(objs, req.Path, false),
+			Total:    int64(total),
+			Readme:   "",
+			Write:    false,
+			Provider: provider,
+		}})
+		return
+	}
+	info := service.MyService.System().GetDirPath(req.Path)
 	shares := service.MyService.Shares().GetSharesList()
 	shares := service.MyService.Shares().GetSharesList()
 	sharesMap := make(map[string]string)
 	sharesMap := make(map[string]string)
 	for _, v := range shares {
 	for _, v := range shares {
@@ -250,17 +313,30 @@ func DirPath(c *gin.Context) {
 		}
 		}
 	}
 	}
 
 
-	pathList := []model.Path{}
+	pathList := []ObjResp{}
 	for i := 0; i < len(info); i++ {
 	for i := 0; i < len(info); i++ {
 		if info[i].Name == ".temp" && info[i].IsDir {
 		if info[i].Name == ".temp" && info[i].IsDir {
 			continue
 			continue
 		}
 		}
+
 		if _, ok := fileQueue[info[i].Path]; !ok {
 		if _, ok := fileQueue[info[i].Path]; !ok {
-			pathList = append(pathList, info[i])
+			t := ObjResp{}
+			t.IsDir = info[i].IsDir
+			t.Name = info[i].Name
+			t.Modified = info[i].Date
+			t.Size = info[i].Size
+			t.Path = info[i].Path
+			pathList = append(pathList, t)
 		}
 		}
 	}
 	}
-
-	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: pathList})
+	flist := FsListResp{
+		Content:  pathList,
+		Total:    int64(len(pathList)),
+		Readme:   "",
+		Write:    true,
+		Provider: "local",
+	}
+	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: flist})
 }
 }
 
 
 // @Summary rename file or dir
 // @Summary rename file or dir
@@ -660,3 +736,168 @@ func GetSize(c *gin.Context) {
 	}
 	}
 	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: size})
 	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: size})
 }
 }
+func Proxy(c *gin.Context) {
+	rawPath := c.Query("path")
+	filename := filepath.Base(rawPath)
+	storage, err := service.MyService.FsService().GetStorage(rawPath)
+	if err != nil {
+		c.JSON(500, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+		return
+	}
+	if canProxy(storage, filename) {
+		downProxyUrl := storage.GetStorage().DownProxyUrl
+		if downProxyUrl != "" {
+			_, ok := c.GetQuery("d")
+			if !ok {
+				URL := fmt.Sprintf("%s%s?sign=%s",
+					strings.Split(downProxyUrl, "\n")[0],
+					utils.EncodePath(rawPath, true),
+					sign.Sign(rawPath))
+				c.Redirect(302, URL)
+				return
+			}
+		}
+		link, file, err := service.MyService.FsService().Link(c, rawPath, model.LinkArgs{
+			Header: c.Request.Header,
+			Type:   c.Query("type"),
+		})
+		if err != nil {
+			c.JSON(common_err.SERVICE_ERROR, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+
+			return
+		}
+		err = CommonProxy(c.Writer, c.Request, link, file)
+		if err != nil {
+			c.JSON(common_err.SERVICE_ERROR, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+			return
+		}
+	} else {
+		c.JSON(common_err.SERVICE_ERROR, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: "proxy not allowed"})
+		return
+	}
+}
+
+// TODO need optimize
+// when should be proxy?
+// 1. config.MustProxy()
+// 2. storage.WebProxy
+// 3. proxy_types
+func shouldProxy(storage driver.Driver, filename string) bool {
+	if storage.Config().MustProxy() || storage.GetStorage().WebProxy {
+		return true
+	}
+	if utils.SliceContains(conf.SlicesMap[conf.ProxyTypes], utils.Ext(filename)) {
+		return true
+	}
+	return false
+}
+
+// TODO need optimize
+// when can be proxy?
+// 1. text file
+// 2. config.MustProxy()
+// 3. storage.WebProxy
+// 4. proxy_types
+// solution: text_file + shouldProxy()
+func canProxy(storage driver.Driver, filename string) bool {
+	if storage.Config().MustProxy() || storage.GetStorage().WebProxy || storage.GetStorage().WebdavProxy() {
+		return true
+	}
+	if utils.SliceContains(conf.SlicesMap[conf.ProxyTypes], utils.Ext(filename)) {
+		return true
+	}
+	if utils.SliceContains(conf.SlicesMap[conf.TextTypes], utils.Ext(filename)) {
+		return true
+	}
+	return false
+}
+
+var HttpClient = &http.Client{}
+
+func CommonProxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
+	// read data with native
+	var err error
+	if link.Data != nil {
+		defer func() {
+			_ = link.Data.Close()
+		}()
+		w.Header().Set("Content-Type", "application/octet-stream")
+		w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, file.GetName(), url.QueryEscape(file.GetName())))
+		w.Header().Set("Content-Length", strconv.FormatInt(file.GetSize(), 10))
+		if link.Header != nil {
+			// TODO clean header with blacklist or whitelist
+			link.Header.Del("set-cookie")
+			for h, val := range link.Header {
+				w.Header()[h] = val
+			}
+		}
+		if link.Status == 0 {
+			w.WriteHeader(http.StatusOK)
+		} else {
+			w.WriteHeader(link.Status)
+		}
+		_, err = io.Copy(w, link.Data)
+		if err != nil {
+			return err
+		}
+		return nil
+	}
+	// local file
+	if link.FilePath != nil && *link.FilePath != "" {
+		f, err := os.Open(*link.FilePath)
+		if err != nil {
+			return err
+		}
+		defer func() {
+			_ = f.Close()
+		}()
+		fileStat, err := os.Stat(*link.FilePath)
+		if err != nil {
+			return err
+		}
+		w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, file.GetName(), url.QueryEscape(file.GetName())))
+		http.ServeContent(w, r, file.GetName(), fileStat.ModTime(), f)
+		return nil
+	} else {
+		req, err := http.NewRequest(link.Method, link.URL, nil)
+		if err != nil {
+			return err
+		}
+		for h, val := range r.Header {
+			if utils.SliceContains(conf.SlicesMap[conf.ProxyIgnoreHeaders], strings.ToLower(h)) {
+				continue
+			}
+			req.Header[h] = val
+		}
+		for h, val := range link.Header {
+			req.Header[h] = val
+		}
+		res, err := HttpClient.Do(req)
+		if err != nil {
+			return err
+		}
+		defer func() {
+			_ = res.Body.Close()
+		}()
+		logger.Info("proxy status", zap.Any("status", res.StatusCode))
+		// TODO clean header with blacklist or whitelist
+		res.Header.Del("set-cookie")
+		for h, v := range res.Header {
+			w.Header()[h] = v
+		}
+		w.WriteHeader(res.StatusCode)
+		w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, file.GetName(), url.QueryEscape(file.GetName())))
+		if res.StatusCode >= 400 {
+			all, _ := ioutil.ReadAll(res.Body)
+			msg := string(all)
+			logger.Info("msg", zap.Any("msg", msg))
+
+			return errors.New(msg)
+		}
+		_, err = io.Copy(w, res.Body)
+		if err != nil {
+			return err
+		}
+		return nil
+	}
+}

+ 93 - 0
route/v1/file_read.go

@@ -0,0 +1,93 @@
+package v1
+
+import (
+	"path/filepath"
+	"time"
+
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils/common_err"
+	"github.com/IceWhaleTech/CasaOS/service"
+
+	"github.com/gin-gonic/gin"
+)
+
+type ListReq struct {
+	model.PageReq
+	Path    string `json:"path" form:"path"`
+	Refresh bool   `json:"refresh"`
+}
+type ObjResp struct {
+	Name     string    `json:"name"`
+	Size     int64     `json:"size"`
+	IsDir    bool      `json:"is_dir"`
+	Modified time.Time `json:"modified"`
+	Sign     string    `json:"sign"`
+	Thumb    string    `json:"thumb"`
+	Type     int       `json:"type"`
+	Path     string    `json:"path"`
+}
+type FsListResp struct {
+	Content  []ObjResp `json:"content"`
+	Total    int64     `json:"total"`
+	Readme   string    `json:"readme"`
+	Write    bool      `json:"write"`
+	Provider string    `json:"provider"`
+}
+
+func FsList(c *gin.Context) {
+	var req ListReq
+	if err := c.ShouldBind(&req); err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.CLIENT_ERROR, Message: common_err.GetMsg(common_err.CLIENT_ERROR), Data: err.Error()})
+		return
+	}
+	req.Validate()
+	objs, err := service.MyService.FsService().FList(c, req.Path, req.Refresh)
+	if err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+		return
+	}
+	total, objs := pagination(objs, &req.PageReq)
+	provider := "unknown"
+	storage, err := service.MyService.FsService().GetStorage(req.Path)
+	if err == nil {
+		provider = storage.GetStorage().Driver
+	}
+	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: FsListResp{
+		Content:  toObjsResp(objs, req.Path, false),
+		Total:    int64(total),
+		Readme:   "",
+		Write:    false,
+		Provider: provider,
+	}})
+}
+func pagination(objs []model.Obj, req *model.PageReq) (int, []model.Obj) {
+	pageIndex, pageSize := req.Page, req.PerPage
+	total := len(objs)
+	start := (pageIndex - 1) * pageSize
+	if start > total {
+		return total, []model.Obj{}
+	}
+	end := start + pageSize
+	if end > total {
+		end = total
+	}
+	return total, objs[start:end]
+}
+
+func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
+	var resp []ObjResp
+	for _, obj := range objs {
+		thumb, _ := model.GetThumb(obj)
+		resp = append(resp, ObjResp{
+			Name:     obj.GetName(),
+			Size:     obj.GetSize(),
+			IsDir:    obj.IsDir(),
+			Modified: obj.ModTime(),
+			Sign:     "",
+			Path:     filepath.Join(parent, obj.GetName()),
+			Thumb:    thumb,
+			Type:     0,
+		})
+	}
+	return resp
+}

+ 163 - 0
route/v1/recover.go

@@ -0,0 +1,163 @@
+package v1
+
+import (
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/IceWhaleTech/CasaOS/drivers/dropbox"
+	"github.com/IceWhaleTech/CasaOS/drivers/google_drive"
+	"github.com/IceWhaleTech/CasaOS/internal/op"
+	"github.com/IceWhaleTech/CasaOS/service"
+	"github.com/gin-gonic/gin"
+)
+
+func GetRecoverStorage(c *gin.Context) {
+	c.Header("Content-Type", "text/html; charset=utf-8")
+	t := c.Param("type")
+	currentTime := time.Now().UTC()
+	currentDate := time.Now().UTC().Format("2006-01-02")
+	//	timeStr := time.Now().Format("20060102150405")
+	if t == "GoogleDrive" {
+
+		gd := op.GetDriverInfoMap()[t]
+
+		add := google_drive.Addition{}
+		add.Code = c.Query("code")
+		if len(add.Code) == 0 {
+			c.String(200, `<p>code cannot be empty</p>`)
+			return
+		}
+		add.RootFolderID = "root"
+		for _, v := range gd {
+			if v.Name == "client_id" {
+				add.ClientID = v.Default
+			}
+			if v.Name == "client_secret" {
+				add.ClientSecret = v.Default
+			}
+			if v.Name == "chunk_size" {
+				cs, err := strconv.ParseInt(v.Default, 10, 64)
+				if err != nil {
+					cs = 5
+				}
+				add.ChunkSize = cs
+			}
+		}
+
+		var google_drive google_drive.GoogleDrive
+		google_drive.Addition = add
+		err := google_drive.Init(c)
+		if err != nil {
+			c.String(200, `<p>Initialization failure:`+err.Error()+`</p>`)
+			return
+		}
+
+		username, err := google_drive.GetUserInfo(c)
+		if err != nil {
+			c.String(200, `<p>Failed to get user information:`+err.Error()+`</p>`)
+			return
+		}
+		if len(username) > 0 {
+			a := strings.Split(username, "@")
+			username = a[0]
+		}
+		username += "_drive"
+		dataMap, _ := service.MyService.Storage().GetConfigByName(username)
+		if len(dataMap) > 0 {
+			c.String(200, `<p>The same configuration has been added</p>`)
+			service.MyService.Storage().CheckAndMountByName(username)
+			return
+		}
+		dmap := make(map[string]string)
+		dmap["client_id"] = add.ClientID
+		dmap["client_secret"] = add.ClientSecret
+		dmap["scope"] = "drive"
+		dmap["mount_point"] = "/mnt/" + username
+		dmap["token"] = `{"access_token":"` + google_drive.AccessToken + `","token_type":"Bearer","refresh_token":"` + google_drive.RefreshToken + `","expiry":"` + currentDate + `T` + currentTime.Add(time.Hour*1).Format("15:04:05") + `Z"}`
+		// data.SetValue(username, "type", "drive")
+		// data.SetValue(username, "client_id", "865173455964-4ce3gdl73ak5s15kn1vkn73htc8tant2.apps.googleusercontent.com")
+		// data.SetValue(username, "client_secret", "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")
+		// data.SetValue(username, "scope", "drive")
+		// data.SetValue(username, "mount_point", "/mnt/"+username)
+		// data.SetValue(username, "token", `{"access_token":"`+google_drive.AccessToken+`","token_type":"Bearer","refresh_token":"`+google_drive.RefreshToken+`","expiry":"`+currentDate+`T`+currentTime.Add(time.Hour*1).Format("15:04:05")+`Z"}`)
+		// e = data.Save()
+		// if e != nil {
+		// 	c.String(200, `<p>保存配置失败:`+e.Error()+`</p>`)
+		// 	return
+		// }
+		service.MyService.Storage().CreateConfig(dmap, username, "drive")
+		service.MyService.Storage().MountStorage("/mnt/"+username, username+":")
+		notify := make(map[string]interface{})
+		notify["status"] = "success"
+		service.MyService.Notify().SendNotify("recover_status", notify)
+	} else if t == "Dropbox" {
+
+		//mountPath += timeStr
+
+		db := op.GetDriverInfoMap()[t]
+
+		add := dropbox.Addition{}
+		add.Code = c.Query("code")
+		if len(add.Code) == 0 {
+			c.String(200, `<p>code cannot be empty</p>`)
+			return
+		}
+		add.RootFolderID = ""
+		for _, v := range db {
+			if v.Name == "app_key" {
+				add.AppKey = v.Default
+			}
+			if v.Name == "app_secret" {
+				add.AppSecret = v.Default
+			}
+		}
+		var dropbox dropbox.Dropbox
+		dropbox.Addition = add
+		err := dropbox.Init(c)
+		if err != nil {
+			c.String(200, `<p>Initialization failure:`+err.Error()+`</p>`)
+			return
+		}
+		username, err := dropbox.GetUserInfo(c)
+		if err != nil {
+			c.String(200, `<p>Failed to get user information:`+err.Error()+`</p>`)
+			return
+		}
+		if len(username) > 0 {
+			a := strings.Split(username, "@")
+			username = a[0]
+		}
+		username += "_dropbox"
+		dataMap, _ := service.MyService.Storage().GetConfigByName(username)
+		if len(dataMap) > 0 {
+			c.String(200, `<p>The same configuration has been added</p>`)
+			service.MyService.Storage().CheckAndMountByName(username)
+			return
+		}
+		dmap := make(map[string]string)
+		dmap["client_id"] = add.AppKey
+		dmap["client_secret"] = add.AppSecret
+		dmap["token"] = `{"access_token":"` + dropbox.AccessToken + `","token_type":"bearer","refresh_token":"` + dropbox.Addition.RefreshToken + `","expiry":"` + currentDate + `T` + currentTime.Add(time.Hour*3).Format("15:04:05") + `.780385354Z"}`
+		dmap["mount_point"] = "/mnt/" + username
+		// data.SetValue(username, "type", "dropbox")
+		// data.SetValue(username, "client_id", add.AppKey)
+		// data.SetValue(username, "client_secret", add.AppSecret)
+		// data.SetValue(username, "mount_point", "/mnt/"+username)
+
+		// data.SetValue(username, "token", `{"access_token":"`+dropbox.AccessToken+`","token_type":"bearer","refresh_token":"`+dropbox.Addition.RefreshToken+`","expiry":"`+currentDate+`T`+currentTime.Add(time.Hour*3).Format("15:04:05")+`.780385354Z"}`)
+		// e = data.Save()
+		// if e != nil {
+		// 	c.String(200, `<p>保存配置失败:`+e.Error()+`</p>`)
+
+		// 	return
+		// }
+		service.MyService.Storage().CreateConfig(dmap, username, "dropbox")
+		service.MyService.Storage().MountStorage("/mnt/"+username, username+":")
+		notify := make(map[string]interface{})
+		notify["status"] = "success"
+		service.MyService.Notify().SendNotify("recover_status", notify)
+	}
+
+	c.String(200, `<p>Just close the page</p><script>window.close()</script>`)
+}

+ 5 - 1
route/v1/samba.go

@@ -12,6 +12,7 @@ package v1
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"io/ioutil"
 	"net/http"
 	"net/http"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
@@ -200,7 +201,10 @@ func DeleteSambaConnections(c *gin.Context) {
 	for _, v := range mountPointList {
 	for _, v := range mountPointList {
 		service.MyService.Connections().UnmountSmaba(v.Path)
 		service.MyService.Connections().UnmountSmaba(v.Path)
 	}
 	}
-	os.RemoveAll(connection.MountPoint)
+	dir, _ := ioutil.ReadDir(connection.MountPoint)
+	if len(dir) == 0 {
+		os.RemoveAll(connection.MountPoint)
+	}
 	service.MyService.Connections().DeleteConnection(id)
 	service.MyService.Connections().DeleteConnection(id)
 	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: id})
 	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: id})
 }
 }

+ 131 - 0
route/v1/storage.go

@@ -0,0 +1,131 @@
+package v1
+
+import (
+	"strconv"
+	"strings"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/drivers/dropbox"
+	"github.com/IceWhaleTech/CasaOS/drivers/google_drive"
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils/common_err"
+	"github.com/IceWhaleTech/CasaOS/service"
+	"github.com/gin-gonic/gin"
+	"go.uber.org/zap"
+)
+
+func ListStorages(c *gin.Context) {
+	// var req model.PageReq
+	// if err := c.ShouldBind(&req); err != nil {
+	// 	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.CLIENT_ERROR, Message: common_err.GetMsg(common_err.CLIENT_ERROR), Data: err.Error()})
+	// 	return
+	// }
+	// req.Validate()
+
+	//logger.Info("ListStorages", zap.Any("req", req))
+	//storages, total, err := service.MyService.Storage().GetStorages(req.Page, req.PerPage)
+	// if err != nil {
+	// 	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+	// 	return
+	// }
+	// c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: model.PageResp{
+	// 	Content: storages,
+	// 	Total:   total,
+	// }})
+	r, err := service.MyService.Storage().GetStorages()
+
+	if err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+		return
+	}
+
+	for i := 0; i < len(r.MountPoints); i++ {
+		dataMap, err := service.MyService.Storage().GetConfigByName(r.MountPoints[i].Fs)
+		if err != nil {
+			logger.Error("GetConfigByName", zap.Any("err", err))
+			continue
+		}
+		if dataMap["type"] == "drive" {
+			r.MountPoints[i].Icon = google_drive.ICONURL
+		}
+		if dataMap["type"] == "dropbox" {
+			r.MountPoints[i].Icon = dropbox.ICONURL
+		}
+	}
+
+	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: r})
+}
+
+func UpdateStorage(c *gin.Context) {
+	var req model.Storage
+	if err := c.ShouldBind(&req); err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.CLIENT_ERROR, Message: common_err.GetMsg(common_err.CLIENT_ERROR), Data: err.Error()})
+		return
+	}
+	if err := service.MyService.Storages().UpdateStorage(c, req); err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+	} else {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: "success"})
+	}
+}
+
+func DeleteStorage(c *gin.Context) {
+	json := make(map[string]string)
+	c.ShouldBind(&json)
+	mountPoint := json["mount_point"]
+	if mountPoint == "" {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.CLIENT_ERROR, Message: common_err.GetMsg(common_err.CLIENT_ERROR), Data: "mount_point is empty"})
+		return
+	}
+	err := service.MyService.Storage().UnmountStorage(mountPoint)
+	if err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+		return
+	}
+	service.MyService.Storage().DeleteConfigByName(strings.ReplaceAll(mountPoint, "/mnt/", ""))
+	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: "success"})
+}
+
+func DisableStorage(c *gin.Context) {
+	idStr := c.Query("id")
+	id, err := strconv.Atoi(idStr)
+	if err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.CLIENT_ERROR, Message: common_err.GetMsg(common_err.CLIENT_ERROR), Data: err.Error()})
+		return
+	}
+	if err := service.MyService.Storages().DisableStorage(c, uint(id)); err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+		return
+	}
+	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: "success"})
+}
+
+func EnableStorage(c *gin.Context) {
+	idStr := c.Query("id")
+	id, err := strconv.Atoi(idStr)
+	if err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.CLIENT_ERROR, Message: common_err.GetMsg(common_err.CLIENT_ERROR), Data: err.Error()})
+		return
+	}
+	if err := service.MyService.Storages().EnableStorage(c, uint(id)); err != nil {
+		c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+		return
+	}
+	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: "success"})
+}
+
+func GetStorage(c *gin.Context) {
+
+	// idStr := c.Query("id")
+	// id, err := strconv.Atoi(idStr)
+	// if err != nil {
+	// 	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.CLIENT_ERROR, Message: common_err.GetMsg(common_err.CLIENT_ERROR), Data: err.Error()})
+	// 	return
+	// }
+	// storage, err := service.MyService.Storage().GetStorageById(uint(id))
+	// if err != nil {
+	// 	c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SERVICE_ERROR, Message: common_err.GetMsg(common_err.SERVICE_ERROR), Data: err.Error()})
+	// 	return
+	// }
+	// c.JSON(common_err.SUCCESS, model.Result{Success: common_err.SUCCESS, Message: common_err.GetMsg(common_err.SUCCESS), Data: storage})
+}

+ 154 - 0
service/fs.go

@@ -0,0 +1,154 @@
+package service
+
+import (
+	"context"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/model"
+	log "github.com/dsoprea/go-logging"
+	"go.uber.org/zap"
+)
+
+type FsService interface {
+	FList(ctx context.Context, path string, refresh ...bool) ([]model.Obj, error)
+	GetStorage(path string) (driver.Driver, error)
+	Link(ctx context.Context, path string, args model.LinkArgs) (*model.Link, model.Obj, error)
+}
+
+type fsService struct {
+}
+
+// the param named path of functions in this package is a mount path
+// So, the purpose of this package is to convert mount path to actual path
+// then pass the actual path to the op package
+
+func (f *fsService) FList(ctx context.Context, path string, refresh ...bool) ([]model.Obj, error) {
+	res, err := MyService.FsListService().FsList(ctx, path, refresh...)
+	if err != nil {
+		logger.Info("failed list", zap.Any("path", path), zap.Any("err", err))
+		return nil, err
+	}
+	return res, nil
+}
+
+// func (f *fsService) Get(ctx context.Context, path string) (model.Obj, error) {
+// 	res, err := get(ctx, path)
+// 	if err != nil {
+// 		log.Errorf("failed get %s: %+v", path, err)
+// 		return nil, err
+// 	}
+// 	return res, nil
+// }
+
+func (f *fsService) Link(ctx context.Context, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
+	res, file, err := MyService.FsLinkService().Link(ctx, path, args)
+	if err != nil {
+		log.Errorf("failed link %s: %+v", path, err)
+		return nil, nil, err
+	}
+	return res, file, nil
+}
+
+// func (f *fsService) MakeDir(ctx context.Context, path string, lazyCache ...bool) error {
+// 	err := makeDir(ctx, path, lazyCache...)
+// 	if err != nil {
+// 		log.Errorf("failed make dir %s: %+v", path, err)
+// 	}
+// 	return err
+// }
+
+// func (f *fsService) Move(ctx context.Context, srcPath, dstDirPath string, lazyCache ...bool) error {
+// 	err := move(ctx, srcPath, dstDirPath, lazyCache...)
+// 	if err != nil {
+// 		log.Errorf("failed move %s to %s: %+v", srcPath, dstDirPath, err)
+// 	}
+// 	return err
+// }
+
+// func (f *fsService) Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (bool, error) {
+// 	res, err := _copy(ctx, srcObjPath, dstDirPath, lazyCache...)
+// 	if err != nil {
+// 		log.Errorf("failed copy %s to %s: %+v", srcObjPath, dstDirPath, err)
+// 	}
+// 	return res, err
+// }
+
+// func (f *fsService) Rename(ctx context.Context, srcPath, dstName string, lazyCache ...bool) error {
+// 	err := rename(ctx, srcPath, dstName, lazyCache...)
+// 	if err != nil {
+// 		log.Errorf("failed rename %s to %s: %+v", srcPath, dstName, err)
+// 	}
+// 	return err
+// }
+
+// func (f *fsService) Remove(ctx context.Context, path string) error {
+// 	err := remove(ctx, path)
+// 	if err != nil {
+// 		log.Errorf("failed remove %s: %+v", path, err)
+// 	}
+// 	return err
+// }
+
+// func PutDirectly(ctx context.Context, dstDirPath string, file *model.FileStream, lazyCache ...bool) error {
+// 	err := putDirectly(ctx, dstDirPath, file, lazyCache...)
+// 	if err != nil {
+// 		log.Errorf("failed put %s: %+v", dstDirPath, err)
+// 	}
+// 	return err
+// }
+
+// func (f *fsService) PutAsTask(dstDirPath string, file *model.FileStream) error {
+// 	err := putAsTask(dstDirPath, file)
+// 	if err != nil {
+// 		log.Errorf("failed put %s: %+v", dstDirPath, err)
+// 	}
+// 	return err
+// }
+
+func (f *fsService) GetStorage(path string) (driver.Driver, error) {
+	storageDriver, _, err := MyService.StoragePath().GetStorageAndActualPath(path)
+	if err != nil {
+		return nil, err
+	}
+	return storageDriver, nil
+}
+
+// func (f *fsService) Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
+// 	res, err := other(ctx, args)
+// 	if err != nil {
+// 		log.Errorf("failed remove %s: %+v", args.Path, err)
+// 	}
+// 	return res, err
+// }
+
+// func get(ctx context.Context, path string) (model.Obj, error) {
+// 	path = utils.FixAndCleanPath(path)
+// 	// maybe a virtual file
+// 	if path != "/" {
+// 		virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(path))
+// 		for _, f := range virtualFiles {
+// 			if f.GetName() == stdpath.Base(path) {
+// 				return f, nil
+// 			}
+// 		}
+// 	}
+// 	storage, actualPath, err := op.GetStorageAndActualPath(path)
+// 	if err != nil {
+// 		// if there are no storage prefix with path, maybe root folder
+// 		if path == "/" {
+// 			return &model.Object{
+// 				Name:     "root",
+// 				Size:     0,
+// 				Modified: time.Time{},
+// 				IsFolder: true,
+// 			}, nil
+// 		}
+// 		return nil, errors.WithMessage(err, "failed get storage")
+// 	}
+// 	return op.Get(ctx, storage, actualPath)
+// }
+
+func NewFsService() FsService {
+	return &fsService{}
+}

+ 27 - 0
service/fs_link.go

@@ -0,0 +1,27 @@
+package service
+
+import (
+	"context"
+
+	"github.com/IceWhaleTech/CasaOS/internal/op"
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/pkg/errors"
+)
+
+type FsLinkService interface {
+	Link(ctx context.Context, path string, args model.LinkArgs) (*model.Link, model.Obj, error)
+}
+
+type fsLinkService struct {
+}
+
+func (f *fsLinkService) Link(ctx context.Context, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
+	storage, actualPath, err := MyService.StoragePath().GetStorageAndActualPath(path)
+	if err != nil {
+		return nil, nil, errors.WithMessage(err, "failed get storage")
+	}
+	return op.Link(ctx, storage, actualPath, args)
+}
+func NewFsLinkService() FsLinkService {
+	return &fsLinkService{}
+}

+ 198 - 0
service/fs_list.go

@@ -0,0 +1,198 @@
+package service
+
+import (
+	"context"
+	stdpath "path"
+	"time"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/internal/op"
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/IceWhaleTech/CasaOS/pkg/singleflight"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils"
+	"github.com/Xhofe/go-cache"
+
+	log "github.com/dsoprea/go-logging"
+	"github.com/pkg/errors"
+	"go.uber.org/zap"
+)
+
+type FsListService interface {
+	FsList(ctx context.Context, path string, refresh ...bool) ([]model.Obj, error)
+	Key(storage driver.Driver, path string) string
+	Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, error)
+	GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.Obj, error)
+	List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs, refresh ...bool) ([]model.Obj, error)
+}
+
+type fsListService struct {
+}
+
+var listCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
+var listG singleflight.Group[[]model.Obj]
+
+// List files
+func (fl *fsListService) FsList(ctx context.Context, path string, refresh ...bool) ([]model.Obj, error) {
+
+	virtualFiles := MyService.Storages().GetStorageVirtualFilesByPath(path)
+	storage, actualPath, err := MyService.StoragePath().GetStorageAndActualPath(path)
+	if err != nil && len(virtualFiles) == 0 {
+		return nil, errors.WithMessage(err, "failed get storage")
+	}
+
+	var _objs []model.Obj
+	if storage != nil {
+		_objs, err = fl.List(ctx, storage, actualPath, model.ListArgs{
+			ReqPath: path,
+		}, refresh...)
+		if err != nil {
+			log.Errorf("%+v", err)
+			if len(virtualFiles) == 0 {
+				return nil, errors.WithMessage(err, "failed get objs")
+			}
+		}
+	}
+
+	om := model.NewObjMerge()
+
+	objs := om.Merge(virtualFiles, _objs...)
+	return objs, nil
+}
+
+func (fl *fsListService) Key(storage driver.Driver, path string) string {
+	return stdpath.Join(storage.GetStorage().MountPath, utils.FixAndCleanPath(path))
+}
+
+// Get object from list of files
+func (fl *fsListService) Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, error) {
+	path = utils.FixAndCleanPath(path)
+	logger.Info("get", zap.String("path", path))
+
+	// is root folder
+	if utils.PathEqual(path, "/") {
+		var rootObj model.Obj
+		switch r := storage.GetAddition().(type) {
+		case driver.IRootId:
+			rootObj = &model.Object{
+				ID:       r.GetRootId(),
+				Name:     op.RootName,
+				Size:     0,
+				Modified: storage.GetStorage().Modified,
+				IsFolder: true,
+			}
+		case driver.IRootPath:
+			rootObj = &model.Object{
+				Path:     r.GetRootPath(),
+				Name:     op.RootName,
+				Size:     0,
+				Modified: storage.GetStorage().Modified,
+				IsFolder: true,
+			}
+		default:
+			if storage, ok := storage.(driver.Getter); ok {
+				obj, err := storage.GetRoot(ctx)
+				if err != nil {
+					return nil, errors.WithMessage(err, "failed get root obj")
+				}
+				rootObj = obj
+			}
+		}
+		if rootObj == nil {
+			return nil, errors.Errorf("please implement IRootPath or IRootId or Getter method")
+		}
+		return &model.ObjWrapName{
+			Name: op.RootName,
+			Obj:  rootObj,
+		}, nil
+	}
+
+	// not root folder
+	dir, name := stdpath.Split(path)
+	files, err := fl.List(ctx, storage, dir, model.ListArgs{})
+	if err != nil {
+		return nil, errors.WithMessage(err, "failed get parent list")
+	}
+	for _, f := range files {
+		// TODO maybe copy obj here
+		if f.GetName() == name {
+			return f, nil
+		}
+	}
+	logger.Info("cant find obj with name", zap.Any("name", name))
+	return nil, errors.WithStack(errors.New("object not found"))
+}
+
+func (fl *fsListService) GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.Obj, error) {
+	obj, err := fl.Get(ctx, storage, path)
+	if err != nil {
+		return nil, err
+	}
+	return model.UnwrapObjs(obj), err
+}
+
+// List files in storage, not contains virtual file
+func (fl *fsListService) List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs, refresh ...bool) ([]model.Obj, error) {
+	if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK {
+		return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+	}
+	path = utils.FixAndCleanPath(path)
+	logger.Info("op.List", zap.Any("path", path))
+	key := fl.Key(storage, path)
+	if !utils.IsBool(refresh...) {
+		if files, ok := listCache.Get(key); ok {
+			logger.Info("op.List", zap.Any("use cache", path))
+			return files, nil
+		}
+	}
+	dir, err := fl.GetUnwrap(ctx, storage, path)
+	if err != nil {
+		return nil, errors.WithMessage(err, "failed get dir")
+	}
+	logger.Info("op.List", zap.Any("dir", dir))
+	if !dir.IsDir() {
+		return nil, errors.WithStack(errors.New("not a folder"))
+	}
+	objs, err, _ := listG.Do(key, func() ([]model.Obj, error) {
+		files, err := storage.List(ctx, dir, args)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to list objs")
+		}
+		// set path
+		for _, f := range files {
+			if s, ok := f.(model.SetPath); ok && f.GetPath() == "" && dir.GetPath() != "" {
+				s.SetPath(stdpath.Join(dir.GetPath(), f.GetName()))
+			}
+		}
+		// warp obj name
+		model.WrapObjsName(files)
+		// call hooks
+		go func(reqPath string, files []model.Obj) {
+			for _, hook := range op.ObjsUpdateHooks {
+				hook(args.ReqPath, files)
+			}
+		}(args.ReqPath, files)
+
+		// sort objs
+		if storage.Config().LocalSort {
+			model.SortFiles(files, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
+		}
+		model.ExtractFolder(files, storage.GetStorage().ExtractFolder)
+
+		if !storage.Config().NoCache {
+			if len(files) > 0 {
+				logger.Info("set cache", zap.Any("key", key), zap.Any("files", files))
+				listCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
+			} else {
+				logger.Info("del cache", zap.Any("key", key))
+				listCache.Del(key)
+			}
+		}
+		return files, nil
+	})
+	return objs, err
+}
+
+func NewFsListService() FsListService {
+	return &fsListService{}
+}

+ 58 - 17
service/service.go

@@ -41,6 +41,12 @@ type Repository interface {
 	Rely() RelyService
 	Rely() RelyService
 	Shares() SharesService
 	Shares() SharesService
 	System() SystemService
 	System() SystemService
+	Storage() StorageService
+	Storages() StoragesService
+	StoragePath() StoragePathService
+	FsListService() FsListService
+	FsLinkService() FsLinkService
+	FsService() FsService
 }
 }
 
 
 func NewService(db *gorm.DB, RuntimePath string, socket *socketio.Server) Repository {
 func NewService(db *gorm.DB, RuntimePath string, socket *socketio.Server) Repository {
@@ -54,27 +60,62 @@ func NewService(db *gorm.DB, RuntimePath string, socket *socketio.Server) Reposi
 	}
 	}
 
 
 	return &store{
 	return &store{
-		casa:        NewCasaService(),
-		connections: NewConnectionsService(db),
-		gateway:     gatewayManagement,
-		health:      NewHealthService(),
-		notify:      NewNotifyService(db),
-		rely:        NewRelyService(db),
-		shares:      NewSharesService(db),
-		system:      NewSystemService(),
+		casa:         NewCasaService(),
+		connections:  NewConnectionsService(db),
+		gateway:      gatewayManagement,
+		notify:       NewNotifyService(db),
+		rely:         NewRelyService(db),
+		system:       NewSystemService(),
+		health:       NewHealthService(),
+		shares:       NewSharesService(db),
+		storage:      NewStorageService(),
+		storages:     NewStoragesService(),
+		storage_path: NewStoragePathService(),
+		fs_list:      NewFsListService(),
+		fs_link:      NewFsLinkService(),
+		fs:           NewFsService(),
 	}
 	}
 }
 }
 
 
 type store struct {
 type store struct {
-	db          *gorm.DB
-	casa        CasaService
-	connections ConnectionsService
-	gateway     external.ManagementService
-	health      HealthService
-	notify      NotifyServer
-	rely        RelyService
-	shares      SharesService
-	system      SystemService
+	db           *gorm.DB
+	casa         CasaService
+	notify       NotifyServer
+	rely         RelyService
+	system       SystemService
+	shares       SharesService
+	connections  ConnectionsService
+	gateway      external.ManagementService
+	storage      StorageService
+	storages     StoragesService
+	storage_path StoragePathService
+	fs_list      FsListService
+	fs_link      FsLinkService
+	fs           FsService
+	health       HealthService
+}
+
+func (c *store) FsLinkService() FsLinkService {
+	return c.fs_link
+}
+func (c *store) FsService() FsService {
+	return c.fs
+}
+
+func (c *store) FsListService() FsListService {
+	return c.fs_list
+}
+
+func (c *store) StoragePath() StoragePathService {
+	return c.storage_path
+}
+
+func (c *store) Storages() StoragesService {
+	return c.storages
+}
+
+func (c *store) Storage() StorageService {
+	return c.storage
 }
 }
 
 
 func (c *store) Gateway() external.ManagementService {
 func (c *store) Gateway() external.ManagementService {

+ 100 - 0
service/storage.go

@@ -0,0 +1,100 @@
+package service
+
+import (
+	"io/ioutil"
+
+	"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils/httper"
+)
+
+type StorageService interface {
+	MountStorage(mountPoint, fs string) error
+	UnmountStorage(mountPoint string) error
+	GetStorages() (httper.MountList, error)
+	CreateConfig(data map[string]string, name string, t string) error
+	CheckAndMountByName(name string) error
+	CheckAndMountAll() error
+	GetConfigByName(name string) (map[string]string, error)
+	DeleteConfigByName(name string) error
+}
+
+type storageStruct struct {
+}
+
+func (s *storageStruct) MountStorage(mountPoint, fs string) error {
+	file.IsNotExistMkDir(mountPoint)
+	httper.Mount(mountPoint, fs)
+	return nil
+}
+func (s *storageStruct) UnmountStorage(mountPoint string) error {
+	err := httper.Unmount(mountPoint)
+	if err == nil {
+		dir, _ := ioutil.ReadDir(mountPoint)
+
+		if len(dir) == 0 {
+			file.RMDir(mountPoint)
+		}
+		return nil
+	}
+	return err
+}
+func (s *storageStruct) GetStorages() (httper.MountList, error) {
+	return httper.GetMountList()
+}
+func (s *storageStruct) CreateConfig(data map[string]string, name string, t string) error {
+	httper.CreateConfig(data, name, t)
+	return nil
+}
+func (s *storageStruct) CheckAndMountByName(name string) error {
+	storages, _ := MyService.Storage().GetStorages()
+	currentRemote, _ := httper.GetConfigByName(name)
+	mountPoint := currentRemote["mount_point"]
+	isMount := false
+	for _, v := range storages.MountPoints {
+		if v.MountPoint == mountPoint {
+			isMount = true
+			break
+		}
+	}
+	if !isMount {
+		MyService.Storage().MountStorage(mountPoint, name+":")
+	}
+	return nil
+}
+func (s *storageStruct) CheckAndMountAll() error {
+	storages, err := MyService.Storage().GetStorages()
+	if err != nil {
+		return err
+	}
+	section, err := httper.GetAllConfigName()
+	if err != nil {
+		return err
+	}
+	for _, v := range section.Remotes {
+		currentRemote, _ := httper.GetConfigByName(v)
+		mountPoint := currentRemote["mount_point"]
+		if len(mountPoint) == 0 {
+			continue
+		}
+		isMount := false
+		for _, v := range storages.MountPoints {
+			if v.MountPoint == mountPoint {
+				isMount = true
+				break
+			}
+		}
+		if !isMount {
+			return MyService.Storage().MountStorage(mountPoint, v+":")
+		}
+	}
+	return nil
+}
+func (s *storageStruct) GetConfigByName(name string) (map[string]string, error) {
+	return httper.GetConfigByName(name)
+}
+func (s *storageStruct) DeleteConfigByName(name string) error {
+	return httper.DeleteConfigByName(name)
+}
+func NewStorageService() StorageService {
+	return &storageStruct{}
+}

+ 73 - 0
service/storage_old.go

@@ -0,0 +1,73 @@
+package service
+
+import (
+	"fmt"
+
+	"github.com/IceWhaleTech/CasaOS/model"
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+type StorageOldService interface {
+	CreateStorage(storage *model.Storage) error
+	UpdateStorage(storage *model.Storage) error
+	DeleteStorageById(id uint) error
+	GetStorages(pageIndex, pageSize int) ([]model.Storage, int64, error)
+	GetStorageById(id uint) (*model.Storage, error)
+	GetEnabledStorages() ([]model.Storage, error)
+}
+
+type storageOldStruct struct {
+	db *gorm.DB
+}
+
+// CreateStorage just insert storage to database
+func (s *storageOldStruct) CreateStorage(storage *model.Storage) error {
+	return errors.WithStack(s.db.Create(storage).Error)
+}
+
+// UpdateStorage just update storage in database
+func (s *storageOldStruct) UpdateStorage(storage *model.Storage) error {
+	return errors.WithStack(s.db.Save(storage).Error)
+}
+
+// DeleteStorageById just delete storage from database by id
+func (s *storageOldStruct) DeleteStorageById(id uint) error {
+	return errors.WithStack(s.db.Delete(&model.Storage{}, id).Error)
+}
+
+// GetStorages Get all storages from database order by index
+func (s *storageOldStruct) GetStorages(pageIndex, pageSize int) ([]model.Storage, int64, error) {
+	storageDB := s.db.Model(&model.Storage{})
+	var count int64
+	if err := storageDB.Count(&count).Error; err != nil {
+		return nil, 0, errors.Wrapf(err, "failed get storages count")
+	}
+	var storages []model.Storage
+	if err := storageDB.Order("`order`").Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&storages).Error; err != nil {
+		return nil, 0, errors.WithStack(err)
+	}
+	return storages, count, nil
+}
+
+// GetStorageById Get Storage by id, used to update storage usually
+func (s *storageOldStruct) GetStorageById(id uint) (*model.Storage, error) {
+	var storage model.Storage
+	storage.ID = id
+	if err := s.db.First(&storage).Error; err != nil {
+		return nil, errors.WithStack(err)
+	}
+	return &storage, nil
+}
+
+func (s *storageOldStruct) GetEnabledStorages() ([]model.Storage, error) {
+	var storages []model.Storage
+	if err := s.db.Where(fmt.Sprintf("%s = ?", "disabled"), false).Find(&storages).Error; err != nil {
+		return nil, errors.WithStack(err)
+	}
+	return storages, nil
+}
+
+func NewStorageOldService(db *gorm.DB) StorageOldService {
+	return &storageOldStruct{db: db}
+}

+ 34 - 0
service/storage_path.go

@@ -0,0 +1,34 @@
+package service
+
+import (
+	"strings"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils"
+	"github.com/pkg/errors"
+	"go.uber.org/zap"
+)
+
+type StoragePathService interface {
+	GetStorageAndActualPath(rawPath string) (storage driver.Driver, actualPath string, err error)
+}
+
+type storagePathStruct struct {
+}
+
+func (s *storagePathStruct) GetStorageAndActualPath(rawPath string) (storage driver.Driver, actualPath string, err error) {
+	rawPath = utils.FixAndCleanPath(rawPath)
+	storage = MyService.Storages().GetBalancedStorage(rawPath)
+	if storage == nil {
+		err = errors.Errorf("can't find storage with rawPath: %s", rawPath)
+		return
+	}
+	logger.Info("use storage", zap.Any("storage mount path", storage.GetStorage().MountPath))
+	mountPath := utils.GetActualMountPath(storage.GetStorage().MountPath)
+	actualPath = utils.FixAndCleanPath(strings.TrimPrefix(rawPath, mountPath))
+	return
+}
+func NewStoragePathService() StoragePathService {
+	return &storagePathStruct{}
+}

+ 398 - 0
service/storage_service.go

@@ -0,0 +1,398 @@
+package service
+
+import (
+	"context"
+	"sort"
+	"strings"
+	"time"
+
+	"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
+	"github.com/IceWhaleTech/CasaOS/pkg/utils"
+	jsoniter "github.com/json-iterator/go"
+	"go.uber.org/zap"
+
+	"github.com/IceWhaleTech/CasaOS/pkg/generic_sync"
+
+	"github.com/IceWhaleTech/CasaOS/model"
+
+	"github.com/IceWhaleTech/CasaOS/internal/driver"
+	"github.com/IceWhaleTech/CasaOS/internal/op"
+	mapset "github.com/deckarep/golang-set/v2"
+	"github.com/pkg/errors"
+)
+
+type StoragesService interface {
+	HasStorage(mountPath string) bool
+	CreateStorage(ctx context.Context, storage model.Storage) (uint, error)
+	LoadStorage(ctx context.Context, storage model.Storage) error
+	EnableStorage(ctx context.Context, id uint) error
+	DisableStorage(ctx context.Context, id uint) error
+	UpdateStorage(ctx context.Context, storage model.Storage) error
+	DeleteStorageById(ctx context.Context, id uint) error
+	MustSaveDriverStorage(driver driver.Driver) error
+	GetStorageVirtualFilesByPath(prefix string) []model.Obj
+	initStorage(ctx context.Context, storage model.Storage, storageDriver driver.Driver, setMountPath func(d driver.Driver, ctx context.Context) string) (err error)
+	InitStorages()
+	GetBalancedStorage(path string) driver.Driver
+}
+
+type storagesStruct struct {
+}
+
+// Although the driver type is stored,
+// there is a storage in each driver,
+// so it should actually be a storage, just wrapped by the driver
+var storagesMap generic_sync.MapOf[string, driver.Driver]
+
+func GetAllStorages() []driver.Driver {
+	return storagesMap.Values()
+}
+
+func (s *storagesStruct) HasStorage(mountPath string) bool {
+	return storagesMap.Has(utils.FixAndCleanPath(mountPath))
+}
+
+func GetStorageByMountPath(mountPath string) (driver.Driver, error) {
+	mountPath = utils.FixAndCleanPath(mountPath)
+	storageDriver, ok := storagesMap.Load(mountPath)
+	if !ok {
+		return nil, errors.Errorf("no mount path for an storage is: %s", mountPath)
+	}
+	return storageDriver, nil
+}
+
+// CreateStorage Save the storage to database so storage can get an id
+// then instantiate corresponding driver and save it in memory
+func (s *storagesStruct) CreateStorage(ctx context.Context, storage model.Storage) (uint, error) {
+	storage.Modified = time.Now()
+	storage.MountPath = utils.FixAndCleanPath(storage.MountPath)
+	var err error
+	// check driver first
+	driverName := storage.Driver
+	driverNew, err := op.GetDriverNew(driverName)
+	if err != nil {
+		return 0, errors.WithMessage(err, "failed get driver new")
+	}
+	storageDriver := driverNew()
+	// // insert storage to database
+	// err = MyService.Storage().CreateStorage(&storage)
+	// if err != nil {
+
+	// 	return storage.ID, errors.WithMessage(err, "failed create storage in database")
+	// }
+	// already has an id
+	err = s.initStorage(ctx, storage, storageDriver, func(d driver.Driver, ctx context.Context) string {
+		u, _ := d.GetUserInfo(ctx)
+		if len(u) > 0 {
+			a := strings.Split(u, "@")
+			u = a[0]
+		}
+		return u
+	})
+	if err != nil {
+		s.DeleteStorageById(ctx, storage.ID)
+		return storage.ID, errors.Wrap(err, "failed init storage")
+	}
+
+	go op.CallStorageHooks("add", storageDriver)
+
+	logger.Error("storage created", zap.Any("storage", storageDriver))
+	return storage.ID, nil
+}
+
+// LoadStorage load exist storage in db to memory
+func (s *storagesStruct) LoadStorage(ctx context.Context, storage model.Storage) error {
+	storage.MountPath = utils.FixAndCleanPath(storage.MountPath)
+	// check driver first
+	driverName := storage.Driver
+	driverNew, err := op.GetDriverNew(driverName)
+	if err != nil {
+		return errors.WithMessage(err, "failed get driver new")
+	}
+	storageDriver := driverNew()
+
+	err = s.initStorage(ctx, storage, storageDriver, nil)
+	go op.CallStorageHooks("add", storageDriver)
+	logger.Info("storage created", zap.Any("storage", storageDriver))
+	return err
+}
+
+// initStorage initialize the driver and store to storagesMap
+func (s *storagesStruct) initStorage(ctx context.Context, storage model.Storage, storageDriver driver.Driver, setMountPath func(d driver.Driver, ctx context.Context) string) (err error) {
+	storageDriver.SetStorage(storage)
+	driverStorage := storageDriver.GetStorage()
+
+	// Unmarshal Addition
+
+	var json = jsoniter.ConfigCompatibleWithStandardLibrary
+
+	err = json.UnmarshalFromString(driverStorage.Addition, storageDriver.GetAddition())
+	if err == nil {
+		err = storageDriver.Init(ctx)
+	}
+	if setMountPath != nil {
+		driverStorage.MountPath += "_" + setMountPath(storageDriver, ctx)
+
+	}
+	if s.HasStorage(driverStorage.MountPath) {
+		return errors.New("mount path already exists")
+	}
+	storageDriver.SetStorage(*driverStorage)
+	storagesMap.Store(driverStorage.MountPath, storageDriver)
+
+	if err != nil {
+		driverStorage.SetStatus(err.Error())
+		err = errors.Wrap(err, "failed init storage")
+	} else {
+		driverStorage.SetStatus(op.WORK)
+	}
+
+	err = s.MustSaveDriverStorage(storageDriver)
+
+	return err
+}
+
+func (s *storagesStruct) EnableStorage(ctx context.Context, id uint) error {
+	// storage, err := MyService.Storage().GetStorageById(id)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed get storage")
+	// }
+	// if !storage.Disabled {
+	// 	return errors.Errorf("this storage have enabled")
+	// }
+	// storage.Disabled = false
+	// err = MyService.Storage().UpdateStorage(storage)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed update storage in db")
+	// }
+	// err = s.LoadStorage(ctx, *storage)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed load storage")
+	// }
+	return nil
+}
+
+func (s *storagesStruct) DisableStorage(ctx context.Context, id uint) error {
+	// storage, err := MyService.Storage().GetStorageById(id)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed get storage")
+	// }
+	// if storage.Disabled {
+	// 	return errors.Errorf("this storage have disabled")
+	// }
+	// storageDriver, err := GetStorageByMountPath(storage.MountPath)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed get storage driver")
+	// }
+	// // drop the storage in the driver
+	// if err := storageDriver.Drop(ctx); err != nil {
+	// 	return errors.Wrap(err, "failed drop storage")
+	// }
+	// // delete the storage in the memory
+	// storage.Disabled = true
+	// err = MyService.Storage().UpdateStorage(storage)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed update storage in db")
+	// }
+	// storagesMap.Delete(storage.MountPath)
+	// go op.CallStorageHooks("del", storageDriver)
+	return nil
+}
+
+// UpdateStorage update storage
+// get old storage first
+// drop the storage then reinitialize
+func (s *storagesStruct) UpdateStorage(ctx context.Context, storage model.Storage) error {
+	// oldStorage, err := MyService.Storage().GetStorageById(storage.ID)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed get old storage")
+	// }
+	// if oldStorage.Driver != storage.Driver {
+	// 	return errors.Errorf("driver cannot be changed")
+	// }
+	// storage.Modified = time.Now()
+	// storage.MountPath = utils.FixAndCleanPath(storage.MountPath)
+	// err = MyService.Storage().UpdateStorage(&storage)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed update storage in database")
+	// }
+	// if storage.Disabled {
+	// 	return nil
+	// }
+	// storageDriver, err := GetStorageByMountPath(oldStorage.MountPath)
+	// if oldStorage.MountPath != storage.MountPath {
+	// 	// mount path renamed, need to drop the storage
+	// 	storagesMap.Delete(oldStorage.MountPath)
+	// }
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed get storage driver")
+	// }
+	// err = storageDriver.Drop(ctx)
+	// if err != nil {
+	// 	return errors.Wrapf(err, "failed drop storage")
+	// }
+
+	// err = s.initStorage(ctx, storage, storageDriver, nil)
+	// go op.CallStorageHooks("update", storageDriver)
+
+	// logger.Info("storage updated", zap.Any("storage", storageDriver))
+	//return err
+	return nil
+}
+
+func (s *storagesStruct) DeleteStorageById(ctx context.Context, id uint) error {
+	// storage, err := MyService.Storage().GetStorageById(id)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed get storage")
+	// }
+	// if !storage.Disabled {
+	// 	storageDriver, err := GetStorageByMountPath(storage.MountPath)
+	// 	if err == nil {
+	// 		// drop the storage in the driver
+	// 		if err := storageDriver.Drop(ctx); err != nil {
+	// 			return errors.Wrapf(err, "failed drop storage")
+	// 		}
+	// 		// delete the storage in the memory
+	// 		storagesMap.Delete(storage.MountPath)
+	// 	}
+
+	// 	go op.CallStorageHooks("del", storageDriver)
+	// }
+	// // delete the storage in the database
+	// if err := MyService.Storage().DeleteStorageById(id); err != nil {
+	// 	return errors.WithMessage(err, "failed delete storage in database")
+	// }
+	return nil
+}
+
+// MustSaveDriverStorage call from specific driver
+func (s *storagesStruct) MustSaveDriverStorage(driver driver.Driver) error {
+	err := saveDriverStorage(driver)
+	if err != nil {
+		logger.Error("failed save driver storage", zap.Any("err", err))
+	}
+	return err
+}
+
+func saveDriverStorage(driver driver.Driver) error {
+	// storage := driver.GetStorage()
+	// addition := driver.GetAddition()
+
+	// var json = jsoniter.ConfigCompatibleWithStandardLibrary
+
+	// str, err := json.MarshalToString(addition)
+	// if err != nil {
+	// 	return errors.Wrap(err, "error while marshal addition")
+	// }
+	// storage.Addition = str
+	// err = MyService.Storage().UpdateStorage(storage)
+	// if err != nil {
+	// 	return errors.WithMessage(err, "failed update storage in database")
+	// }
+	return nil
+}
+
+// getStoragesByPath get storage by longest match path, contains balance storage.
+// for example, there is /a/b,/a/c,/a/d/e,/a/d/e.balance
+// getStoragesByPath(/a/d/e/f) => /a/d/e,/a/d/e.balance
+func getStoragesByPath(path string) []driver.Driver {
+	storages := make([]driver.Driver, 0)
+	curSlashCount := 0
+	storagesMap.Range(func(mountPath string, value driver.Driver) bool {
+		mountPath = utils.GetActualMountPath(mountPath)
+		// is this path
+		if utils.IsSubPath(mountPath, path) {
+			slashCount := strings.Count(utils.PathAddSeparatorSuffix(mountPath), "/")
+			// not the longest match
+			if slashCount > curSlashCount {
+				storages = storages[:0]
+				curSlashCount = slashCount
+			}
+			if slashCount == curSlashCount {
+				storages = append(storages, value)
+			}
+		}
+		return true
+	})
+	// make sure the order is the same for same input
+	sort.Slice(storages, func(i, j int) bool {
+		return storages[i].GetStorage().MountPath < storages[j].GetStorage().MountPath
+	})
+	return storages
+}
+
+// GetStorageVirtualFilesByPath Obtain the virtual file generated by the storage according to the path
+// for example, there are: /a/b,/a/c,/a/d/e,/a/b.balance1,/av
+// GetStorageVirtualFilesByPath(/a) => b,c,d
+func (s *storagesStruct) GetStorageVirtualFilesByPath(prefix string) []model.Obj {
+	files := make([]model.Obj, 0)
+	storages := storagesMap.Values()
+	sort.Slice(storages, func(i, j int) bool {
+		if storages[i].GetStorage().Order == storages[j].GetStorage().Order {
+			return storages[i].GetStorage().MountPath < storages[j].GetStorage().MountPath
+		}
+		return storages[i].GetStorage().Order < storages[j].GetStorage().Order
+	})
+
+	prefix = utils.FixAndCleanPath(prefix)
+	set := mapset.NewSet[string]()
+	for _, v := range storages {
+		mountPath := utils.GetActualMountPath(v.GetStorage().MountPath)
+		// Exclude prefix itself and non prefix
+		if len(prefix) >= len(mountPath) || !utils.IsSubPath(prefix, mountPath) {
+			continue
+		}
+		name := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2)[0]
+		if set.Add(name) {
+			files = append(files, &model.Object{
+				Name:     name,
+				Size:     0,
+				Modified: v.GetStorage().Modified,
+				IsFolder: true,
+			})
+		}
+	}
+	return files
+}
+
+var balanceMap generic_sync.MapOf[string, int]
+
+// GetBalancedStorage get storage by path
+func (s *storagesStruct) GetBalancedStorage(path string) driver.Driver {
+	path = utils.FixAndCleanPath(path)
+	storages := getStoragesByPath(path)
+	storageNum := len(storages)
+	switch storageNum {
+	case 0:
+		return nil
+	case 1:
+		return storages[0]
+	default:
+		virtualPath := utils.GetActualMountPath(storages[0].GetStorage().MountPath)
+		i, _ := balanceMap.LoadOrStore(virtualPath, 0)
+		i = (i + 1) % storageNum
+		balanceMap.Store(virtualPath, i)
+		return storages[i]
+	}
+}
+func (s *storagesStruct) InitStorages() {
+	// storages, err := MyService.Storage().GetEnabledStorages()
+	// if err != nil {
+	// 	logger.Error("failed get enabled storages", zap.Any("err", err))
+	// }
+	// go func(storages []model.Storage) {
+	// 	for i := range storages {
+	// 		err := s.LoadStorage(context.Background(), storages[i])
+	// 		if err != nil {
+	// 			logger.Error("failed get enabled storages", zap.Any("err", err))
+	// 		} else {
+	// 			logger.Info("success load storage", zap.String("mount_path", storages[i].MountPath))
+	// 		}
+	// 	}
+	// 	conf.StoragesLoaded = true
+	// }(storages)
+
+}
+func NewStoragesService() StoragesService {
+	return &storagesStruct{}
+}