Sfoglia il codice sorgente

Merge remote-tracking branch 'origin/dev' into dev

Vanessa 3 anni fa
parent
commit
f3e0502a38
3 ha cambiato i file con 63 aggiunte e 180 eliminazioni
  1. 2 2
      kernel/model/backup.go
  2. 51 171
      kernel/model/osssync.go
  3. 10 7
      kernel/model/sync.go

+ 2 - 2
kernel/model/backup.go

@@ -278,7 +278,7 @@ func CreateLocalBackup() (err error) {
 		return
 		return
 	}
 	}
 
 
-	_, err = genCloudIndex(newBackupDir, map[string]bool{})
+	_, err = genCloudIndex(newBackupDir, map[string]bool{}, true)
 	if nil != err {
 	if nil != err {
 		return
 		return
 	}
 	}
@@ -344,7 +344,7 @@ func DownloadBackup() (err error) {
 	localDirPath := Conf.Backup.GetSaveDir()
 	localDirPath := Conf.Backup.GetSaveDir()
 	util.PushEndlessProgress(Conf.Language(68))
 	util.PushEndlessProgress(Conf.Language(68))
 	start := time.Now()
 	start := time.Now()
-	fetchedFilesCount, transferSize, _, err := ossDownload(true, localDirPath, "backup", false, map[string]bool{}, map[string]bool{})
+	fetchedFilesCount, transferSize, _, err := ossDownload(localDirPath, "backup", false, map[string]bool{}, map[string]bool{})
 	if nil == err {
 	if nil == err {
 		elapsed := time.Now().Sub(start).Seconds()
 		elapsed := time.Now().Sub(start).Seconds()
 		util.LogInfof("downloaded backup [fetchedFiles=%d, transferSize=%s] in [%.2fs]", fetchedFilesCount, humanize.Bytes(transferSize), elapsed)
 		util.LogInfof("downloaded backup [fetchedFiles=%d, transferSize=%s] in [%.2fs]", fetchedFilesCount, humanize.Bytes(transferSize), elapsed)

+ 51 - 171
kernel/model/osssync.go

@@ -162,7 +162,7 @@ func listCloudSyncDirOSS() (dirs []map[string]interface{}, size int64, err error
 	return
 	return
 }
 }
 
 
-func ossDownload(isBackup bool, localDirPath, cloudDirPath string, bootOrExit bool, removeList, upsertList map[string]bool) (fetchedFilesCount int, transferSize uint64, downloadedFiles map[string]bool, err error) {
+func ossDownload(localDirPath, cloudDirPath string, bootOrExit bool, removeList, upsertList map[string]bool) (fetchedFilesCount int, transferSize uint64, downloadedFiles map[string]bool, err error) {
 	if !gulu.File.IsExist(localDirPath) {
 	if !gulu.File.IsExist(localDirPath) {
 		return
 		return
 	}
 	}
@@ -183,66 +183,6 @@ func ossDownload(isBackup bool, localDirPath, cloudDirPath string, bootOrExit bo
 		return
 		return
 	}
 	}
 
 
-	if !isBackup && (0 < len(removeList) || 0 < len(upsertList)) {
-		// 上传合并本地变更
-
-		//var removed, upserted bool
-		//var removes []string
-		//for remove, _ := range removeList {
-		//	removes = append(removes, remove)
-		//}
-		//err = ossRemove0(cloudDirPath, removes)
-		//if nil != err {
-		//	util.LogErrorf("remove merge cloud file failed: %s", err)
-		//	return
-		//}
-		//for remove, _ := range removeList {
-		//	delete(cloudFileList, remove)
-		//}
-		//removed = 0 < len(removeList)
-		//
-		//var tmpWroteFiles int
-		//var tmpTransferSize uint64
-		//for upsert, _ := range upsertList {
-		//	if "/.siyuan/conf.json" == upsert { // 版本号不覆盖云端
-		//		continue
-		//	}
-		//	localUpsert := filepath.Join(localDirPath, upsert)
-		//	var info os.FileInfo
-		//	info, err = os.Stat(localUpsert)
-		//	if nil != err {
-		//		util.LogErrorf("stat file [%s] failed: %s", localUpsert, err)
-		//		return
-		//	}
-		//
-		//	if err = ossUpload0(localDirPath, cloudDirPath, localUpsert, &tmpWroteFiles, &tmpTransferSize); nil != err {
-		//		util.LogErrorf("upload merge cloud file [%s] failed: %s", upsert, err)
-		//		return
-		//	}
-		//	cloudFileList[upsert] = &CloudIndex{
-		//		Size:    info.Size(),
-		//		Updated: info.ModTime().Unix(),
-		//	}
-		//	upserted = true
-		//}
-		//
-		//if removed || upserted {
-		//	data, err = gulu.JSON.MarshalJSON(cloudFileList)
-		//	if nil != err {
-		//		util.LogErrorf("marshal cloud file list failed: %s", err)
-		//		return
-		//	}
-		//	if err = os.WriteFile(tmpIndex, data, 0644); nil != err {
-		//		util.LogErrorf("write cloud file list failed: %s", err)
-		//		return
-		//	}
-		//	if err = ossUpload0(tmpSyncDir, cloudDirPath, tmpIndex, &tmpWroteFiles, &tmpTransferSize); nil != err {
-		//		util.LogErrorf("upload merge cloud file [%s] failed: %s", tmpIndex, err)
-		//		return
-		//	}
-		//}
-	}
-
 	localRemoves, cloudFetches, err := localUpsertRemoveListOSS(localDirPath, cloudFileList)
 	localRemoves, cloudFetches, err := localUpsertRemoveListOSS(localDirPath, cloudFileList)
 	if nil != err {
 	if nil != err {
 		return
 		return
@@ -406,19 +346,10 @@ func ossUpload(isBackup bool, localDirPath, cloudDirPath, cloudDevice string, bo
 	}
 	}
 
 
 	localDevice := Conf.System.ID
 	localDevice := Conf.System.ID
-	excludes := getSyncExcludedList(localDirPath)
-	localFileList, genIndexErr := genCloudIndex(localDirPath, excludes)
-	if nil != genIndexErr {
-		err = genIndexErr
-		return
-	}
-
-	var localUpserts, cloudRemoves []string
-	var cloudFileList map[string]*CloudIndex
-	var downloadList map[string]bool
+	var localFileList, cloudFileList map[string]*CloudIndex
 	if "" != localDevice && localDevice == cloudDevice && !isBackup {
 	if "" != localDevice && localDevice == cloudDevice && !isBackup {
-		//util.LogInfof("cloud device is the same as local device, get index from local")
-		localUpserts, cloudRemoves, err = cloudUpsertRemoveLocalListOSS(localDirPath, removeList, upsertList, excludes)
+		// 同一台设备连续上传,使用上一次的本地索引作为云端索引
+		cloudFileList, err = getLocalFileListOSS(isBackup)
 	} else {
 	} else {
 		cloudFileList, err = getCloudFileListOSS(cloudDirPath)
 		cloudFileList, err = getCloudFileListOSS(cloudDirPath)
 	}
 	}
@@ -426,64 +357,25 @@ func ossUpload(isBackup bool, localDirPath, cloudDirPath, cloudDevice string, bo
 		return
 		return
 	}
 	}
 
 
+	calcHash := false
 	if 0 < len(cloudFileList) {
 	if 0 < len(cloudFileList) {
-		localUpserts, cloudRemoves, downloadList, err = cloudUpsertRemoveListOSS(localDirPath, cloudFileList, localFileList, removeList, upsertList, excludes)
-		if nil != err {
-			return
-		}
-		if 0 < len(downloadList) && !isBackup {
-			// 下载合并云端变更
-
-			//var data []byte
-			//data, err = gulu.JSON.MarshalJSON(cloudFileList)
-			//if nil != err {
-			//	return
-			//}
-			//tmpSyncDir := filepath.Join(util.TempDir, "sync")
-			//indexPath := filepath.Join(tmpSyncDir, "index.json")
-			//if err = os.WriteFile(indexPath, data, 0644); nil != err {
-			//	return
-			//}
-			//
-			//var tmpFetchedFiles int
-			//var tmpTransferSize uint64
-			//err = ossDownload0(tmpSyncDir, "sync/"+Conf.Sync.CloudName, "/"+pathJSON, &tmpFetchedFiles, &tmpTransferSize, false)
-			//if nil != err {
-			//	util.LogErrorf("download merge cloud file failed: %s", err)
-			//	return
-			//}
-			//
-			//metaPath := filepath.Join(tmpSyncDir, pathJSON)
-			//var upsertFiles []string
-			//upsertFiles, err = syncDirUpsertWorkspaceData(metaPath, indexPath, downloadList)
-			//if nil != err {
-			//	util.LogErrorf("download merge cloud file failed: %s", err)
-			//	return
-			//}
-			//// 增量索引
-			//for _, upsertFile := range upsertFiles {
-			//	if !strings.HasSuffix(upsertFile, ".sy") {
-			//		continue
-			//	}
-			//
-			//	upsertFile = filepath.ToSlash(upsertFile)
-			//	box := upsertFile[:strings.Index(upsertFile, "/")]
-			//	p := strings.TrimPrefix(upsertFile, box)
-			//	tree, err0 := LoadTree(box, p)
-			//	if nil != err0 {
-			//		continue
-			//	}
-			//	treenode.ReindexBlockTree(tree)
-			//	sql.UpsertTreeQueue(tree)
-			//}
-			//
-			//// 重新生成云端索引
-			//if _, err = genCloudIndex(localDirPath, excludes); nil != err {
-			//	return
-			//}
+		if idx := cloudFileList["/index.json"]; nil != idx {
+			calcHash = 0 == idx.Updated
 		}
 		}
 	}
 	}
 
 
+	excludes := getSyncExcludedList(localDirPath)
+	localFileList, err = genCloudIndex(localDirPath, excludes, calcHash)
+	if nil != err {
+		return
+	}
+
+	var localUpserts, cloudRemoves []string
+	localUpserts, cloudRemoves, err = cloudUpsertRemoveListOSS(localDirPath, cloudFileList, localFileList, excludes)
+	if nil != err {
+		return
+	}
+
 	err = ossRemove0(cloudDirPath, cloudRemoves)
 	err = ossRemove0(cloudDirPath, cloudRemoves)
 	if nil != err {
 	if nil != err {
 		return
 		return
@@ -731,6 +623,28 @@ func getCloudSync(cloudDir string) (assetSize, backupSize int64, device string,
 	return
 	return
 }
 }
 
 
+func getLocalFileListOSS(isBackup bool) (ret map[string]*CloudIndex, err error) {
+	ret = map[string]*CloudIndex{}
+	dir := "sync"
+	if isBackup {
+		dir = "backup"
+	}
+
+	localDirPath := filepath.Join(util.WorkspaceDir, dir)
+	indexPath := filepath.Join(localDirPath, "index.json")
+	if !gulu.File.IsExist(indexPath) {
+		return
+	}
+
+	data, err := os.ReadFile(indexPath)
+	if nil != err {
+		return
+	}
+
+	err = gulu.JSON.UnmarshalJSON(data, &ret)
+	return
+}
+
 func getCloudFileListOSS(cloudDirPath string) (ret map[string]*CloudIndex, err error) {
 func getCloudFileListOSS(cloudDirPath string) (ret map[string]*CloudIndex, err error) {
 	result := map[string]interface{}{}
 	result := map[string]interface{}{}
 	request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
 	request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
@@ -806,8 +720,8 @@ func localUpsertRemoveListOSS(localDirPath string, cloudFileList map[string]*Clo
 			// 优先使用时间戳校验
 			// 优先使用时间戳校验
 			if localModTime := info.ModTime().Unix(); cloudIdx.Updated == localModTime {
 			if localModTime := info.ModTime().Unix(); cloudIdx.Updated == localModTime {
 				unchanged[relPath] = true
 				unchanged[relPath] = true
-				return nil
 			}
 			}
+			return nil
 		}
 		}
 
 
 		localHash, hashErr := util.GetEtag(path)
 		localHash, hashErr := util.GetEtag(path)
@@ -834,37 +748,9 @@ func localUpsertRemoveListOSS(localDirPath string, cloudFileList map[string]*Clo
 	return
 	return
 }
 }
 
 
-func cloudUpsertRemoveLocalListOSS(localDirPath string, removedSyncList, upsertedSyncList, excludes map[string]bool) (localUpserts, cloudRemoves []string, err error) {
+func cloudUpsertRemoveListOSS(localDirPath string, cloudFileList, localFileList map[string]*CloudIndex, excludes map[string]bool) (localUpserts, cloudRemoves []string, err error) {
 	localUpserts, cloudRemoves = []string{}, []string{}
 	localUpserts, cloudRemoves = []string{}, []string{}
 
 
-	for removed, _ := range removedSyncList {
-		cloudRemoves = append(cloudRemoves, removed)
-	}
-
-	for upsert, _ := range upsertedSyncList {
-		p := filepath.Join(localDirPath, upsert)
-		if excludes[p] {
-			continue
-		}
-		info, statErr := os.Stat(p)
-		if nil != statErr {
-			util.LogErrorf("stat file [%s] failed: %s", p, statErr)
-			err = statErr
-			return
-		}
-		if util.CloudSingleFileMaxSizeLimit < info.Size() {
-			util.LogWarnf("file [%s] larger than 100MB, ignore uploading it", p)
-			continue
-		}
-		localUpserts = append(localUpserts, p)
-	}
-	return
-}
-
-func cloudUpsertRemoveListOSS(localDirPath string, cloudFileList, localFileList map[string]*CloudIndex, removeList, upsertList, excludes map[string]bool) (localUpserts, cloudRemoves []string, downloadList map[string]bool, err error) {
-	localUpserts, cloudRemoves = []string{}, []string{}
-
-	cloudChangedList := map[string]bool{}
 	unchanged := map[string]bool{}
 	unchanged := map[string]bool{}
 	for cloudFile, cloudIdx := range cloudFileList {
 	for cloudFile, cloudIdx := range cloudFileList {
 		localIdx := localFileList[cloudFile]
 		localIdx := localFileList[cloudFile]
@@ -872,16 +758,18 @@ func cloudUpsertRemoveListOSS(localDirPath string, cloudFileList, localFileList
 			cloudRemoves = append(cloudRemoves, cloudFile)
 			cloudRemoves = append(cloudRemoves, cloudFile)
 			continue
 			continue
 		}
 		}
+		if 0 < cloudIdx.Updated {
+			// 优先使用时间戳校验
+			if localIdx.Updated == cloudIdx.Updated {
+				unchanged[filepath.Join(localDirPath, cloudFile)] = true
+			}
+			continue
+		}
+
 		if localIdx.Hash == cloudIdx.Hash {
 		if localIdx.Hash == cloudIdx.Hash {
 			unchanged[filepath.Join(localDirPath, cloudFile)] = true
 			unchanged[filepath.Join(localDirPath, cloudFile)] = true
 			continue
 			continue
 		}
 		}
-		//if localIdx.Updated == cloudIdx.Updated {
-		//	unchanged[filepath.Join(localDirPath, cloudFile)] = true
-		//	continue
-		//}
-
-		cloudChangedList[cloudFile] = true
 	}
 	}
 
 
 	filepath.Walk(localDirPath, func(path string, info fs.FileInfo, err error) error {
 	filepath.Walk(localDirPath, func(path string, info fs.FileInfo, err error) error {
@@ -902,14 +790,6 @@ func cloudUpsertRemoveListOSS(localDirPath string, cloudFileList, localFileList
 		}
 		}
 		return nil
 		return nil
 	})
 	})
-
-	downloadList = map[string]bool{}
-	for cloudChanged, _ := range cloudChangedList {
-		if upsertList[cloudChanged] || removeList[cloudChanged] || excludes[cloudChanged] || "/"+pathJSON == cloudChanged || "/index.json" == cloudChanged {
-			continue
-		}
-		downloadList[cloudChanged] = true
-	}
 	return
 	return
 }
 }
 
 

+ 10 - 7
kernel/model/sync.go

@@ -329,7 +329,7 @@ func SyncData(boot, exit, byHand bool) {
 		return
 		return
 	}
 	}
 
 
-	fetchedFilesCount, transferSize, downloadedFiles, err := ossDownload(false, localSyncDirPath, "sync/"+Conf.Sync.CloudName, boot || exit, removeList, upsertList)
+	fetchedFilesCount, transferSize, downloadedFiles, err := ossDownload(localSyncDirPath, "sync/"+Conf.Sync.CloudName, boot || exit, removeList, upsertList)
 	if nil != err {
 	if nil != err {
 		util.PushClearProgress()
 		util.PushClearProgress()
 		msg := fmt.Sprintf(Conf.Language(80), formatErrorMsg(err))
 		msg := fmt.Sprintf(Conf.Language(80), formatErrorMsg(err))
@@ -613,7 +613,7 @@ type CloudIndex struct {
 }
 }
 
 
 // genCloudIndex 生成云端索引文件。
 // genCloudIndex 生成云端索引文件。
-func genCloudIndex(localDirPath string, excludes map[string]bool) (cloudIndex map[string]*CloudIndex, err error) {
+func genCloudIndex(localDirPath string, excludes map[string]bool, calcHash bool) (cloudIndex map[string]*CloudIndex, err error) {
 	cloudIndex = map[string]*CloudIndex{}
 	cloudIndex = map[string]*CloudIndex{}
 	err = filepath.Walk(localDirPath, func(path string, info fs.FileInfo, err error) error {
 	err = filepath.Walk(localDirPath, func(path string, info fs.FileInfo, err error) error {
 		if nil != err {
 		if nil != err {
@@ -629,11 +629,14 @@ func genCloudIndex(localDirPath string, excludes map[string]bool) (cloudIndex ma
 
 
 		p := strings.TrimPrefix(path, localDirPath)
 		p := strings.TrimPrefix(path, localDirPath)
 		p = filepath.ToSlash(p)
 		p = filepath.ToSlash(p)
-		// TODO: 优化云端同步上传资源占用和耗时 https://github.com/siyuan-note/siyuan/issues/5093
-		hash, hashErr := util.GetEtag(path)
-		if nil != hashErr {
-			err = hashErr
-			return io.EOF
+		hash := ""
+		if calcHash {
+			var hashErr error
+			hash, hashErr = util.GetEtag(path)
+			if nil != hashErr {
+				err = hashErr
+				return io.EOF
+			}
 		}
 		}
 		cloudIndex[p] = &CloudIndex{Hash: hash, Size: info.Size(), Updated: info.ModTime().Unix()}
 		cloudIndex[p] = &CloudIndex{Hash: hash, Size: info.Size(), Updated: info.ModTime().Unix()}
 		return nil
 		return nil