Merge remote-tracking branch 'origin/dev' into dev
This commit is contained in:
commit
2af8b67cbd
6 changed files with 53 additions and 23 deletions
|
@ -355,6 +355,11 @@
|
|||
const {ipcRenderer} = require('electron')
|
||||
const fs = require('fs')
|
||||
const initPath = result.filePaths[0]
|
||||
if (isCloudDrivePath(initPath)) {
|
||||
alert('⚠️ This folder may be a cloud sync disk folder, please change to a local folder')
|
||||
return
|
||||
}
|
||||
|
||||
if (!fs.existsSync(initPath)) {
|
||||
fs.mkdirSync(initPath, {mode: 0o755, recursive: true})
|
||||
}
|
||||
|
@ -366,6 +371,12 @@
|
|||
})
|
||||
})
|
||||
})
|
||||
|
||||
const isCloudDrivePath = (absPath) => {
|
||||
const absPathLower = absPath.toLowerCase()
|
||||
return -1 < absPathLower.indexOf("onedrive") || -1 < absPathLower.indexOf("dropbox") ||
|
||||
-1 < absPathLower.indexOf("google drive") || -1 < absPathLower.indexOf("pcloud")
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -41,6 +41,10 @@ func LoadTree(boxID, p string, luteEngine *lute.Lute) (ret *parse.Tree, err erro
|
|||
data, err := filelock.ReadFile(filePath)
|
||||
if nil != err {
|
||||
logging.LogErrorf("load tree [%s] failed: %s", p, err)
|
||||
if errors.Is(err, filelock.ErrUnableAccessFile) {
|
||||
os.Exit(util.ExitCodeFileSysInconsistent)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -92,6 +96,10 @@ func LoadTreeByData(data []byte, boxID, p string, luteEngine *lute.Lute) (ret *p
|
|||
}
|
||||
} else {
|
||||
logging.LogWarnf("read parent tree data [%s] failed: %s", parentAbsPath, readErr)
|
||||
if errors.Is(readErr, filelock.ErrUnableAccessFile) {
|
||||
os.Exit(util.ExitCodeFileSysInconsistent)
|
||||
return
|
||||
}
|
||||
}
|
||||
hPathBuilder.WriteString("Untitled/")
|
||||
continue
|
||||
|
|
|
@ -492,7 +492,7 @@ func RenameAsset(oldPath, newName string) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
newName = util.AssetName(newName) + filepath.Ext(oldPath)
|
||||
newName = util.AssetName(newName + filepath.Ext(oldPath))
|
||||
newPath := "assets/" + newName
|
||||
if err = filelock.Copy(filepath.Join(util.DataDir, oldPath), filepath.Join(util.DataDir, newPath)); nil != err {
|
||||
logging.LogErrorf("copy asset [%s] failed: %s", oldPath, err)
|
||||
|
|
|
@ -59,7 +59,6 @@ type AppConf struct {
|
|||
Export *conf.Export `json:"export"` // 导出配置
|
||||
Graph *conf.Graph `json:"graph"` // 关系图配置
|
||||
UILayout *conf.UILayout `json:"uiLayout"` // 界面布局,v2.8.0 后这个字段不再使用
|
||||
UILayouts []*conf.UILayout `json:"uiLayouts"` // 界面布局列表
|
||||
UserData string `json:"userData"` // 社区用户信息,对 User 加密存储
|
||||
User *conf.User `json:"-"` // 社区用户内存结构,不持久化
|
||||
Account *conf.Account `json:"account"` // 帐号配置
|
||||
|
@ -150,9 +149,6 @@ func InitConf() {
|
|||
if nil == Conf.UILayout {
|
||||
Conf.UILayout = &conf.UILayout{}
|
||||
}
|
||||
if 1 > len(Conf.UILayouts) {
|
||||
Conf.UILayouts = []*conf.UILayout{}
|
||||
}
|
||||
if nil == Conf.Keymap {
|
||||
Conf.Keymap = &conf.Keymap{}
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ func FlushQueue() {
|
|||
context["total"] = groupOpsTotal[op.action]
|
||||
if err = execOp(op, tx, context); nil != err {
|
||||
tx.Rollback()
|
||||
logging.LogErrorf("queue operation failed: %s", err)
|
||||
logging.LogErrorf("queue operation [%s] failed: %s", op.action, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
|
@ -137,7 +138,12 @@ func CheckFileSysStatus() {
|
|||
|
||||
reportFileSysFatalError := func(err error) {
|
||||
stack := debug.Stack()
|
||||
logging.LogErrorf("check file system status failed: %s, %s", err, stack)
|
||||
output := string(stack)
|
||||
if 5 < strings.Count(output, "\n") {
|
||||
lines := strings.Split(output, "\n")
|
||||
output = strings.Join(lines[5:], "\n")
|
||||
}
|
||||
logging.LogErrorf("check file system status failed: %s, %s", err, output)
|
||||
os.Exit(ExitCodeFileSysInconsistent)
|
||||
}
|
||||
|
||||
|
@ -162,7 +168,7 @@ func CheckFileSysStatus() {
|
|||
continue
|
||||
}
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
for i := 0; i < 7; i++ {
|
||||
tmp := filepath.Join(dir, "check_consistency")
|
||||
data := make([]byte, 1024*4)
|
||||
_, err := rand.Read(data)
|
||||
|
@ -176,10 +182,18 @@ func CheckFileSysStatus() {
|
|||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
for j := 0; j < 32; j++ {
|
||||
f, err := os.Open(tmp)
|
||||
renamed := tmp + "_renamed"
|
||||
if err = os.Rename(tmp, renamed); nil != err {
|
||||
reportFileSysFatalError(err)
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
|
||||
f, err := os.Open(renamed)
|
||||
if nil != err {
|
||||
reportFileSysFatalError(err)
|
||||
break
|
||||
|
@ -190,15 +204,7 @@ func CheckFileSysStatus() {
|
|||
break
|
||||
}
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if err = os.Rename(tmp, tmp+"_renamed"); nil != err {
|
||||
reportFileSysFatalError(err)
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
if err = os.Rename(tmp+"_renamed", tmp); nil != err {
|
||||
if err = os.Rename(renamed, tmp); nil != err {
|
||||
reportFileSysFatalError(err)
|
||||
break
|
||||
}
|
||||
|
@ -209,14 +215,23 @@ func CheckFileSysStatus() {
|
|||
break
|
||||
}
|
||||
|
||||
count := 0
|
||||
checkFilenames := bytes.Buffer{}
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() && strings.Contains(entry.Name(), "check_") {
|
||||
count++
|
||||
checkFilenames.WriteString(entry.Name())
|
||||
checkFilenames.WriteString("\n")
|
||||
}
|
||||
}
|
||||
if 1 < count {
|
||||
reportFileSysFatalError(fmt.Errorf("dir [%s] has more than 1 file", dir))
|
||||
lines := strings.Split(strings.TrimSpace(checkFilenames.String()), "\n")
|
||||
if 1 < len(lines) {
|
||||
buf := bytes.Buffer{}
|
||||
for _, line := range lines {
|
||||
buf.WriteString(" ")
|
||||
buf.WriteString(line)
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
output := buf.String()
|
||||
reportFileSysFatalError(fmt.Errorf("dir [%s] has more than 1 file:\n%s", dir, output))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue