123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402 |
- import 'dart:async';
- import 'dart:core';
- import 'dart:io';
- import "package:collection/collection.dart";
- import 'package:flutter/foundation.dart';
- import 'package:logging/logging.dart';
- import 'package:photos/core/configuration.dart';
- import 'package:photos/core/constants.dart';
- import 'package:photos/core/errors.dart';
- import 'package:photos/db/file_updation_db.dart';
- import 'package:photos/db/files_db.dart';
- import 'package:photos/extensions/stop_watch.dart';
- import 'package:photos/models/file.dart' as ente;
- import "package:photos/models/location/location.dart";
- import "package:photos/models/magic_metadata.dart";
- import "package:photos/services/file_magic_service.dart";
- import 'package:photos/services/files_service.dart';
- import "package:photos/utils/exif_util.dart";
- import 'package:photos/utils/file_uploader_util.dart';
- import 'package:photos/utils/file_util.dart';
- import 'package:shared_preferences/shared_preferences.dart';
- // LocalFileUpdateService tracks all the potential local file IDs which have
- // changed/modified on the device and needed to be uploaded again.
- class LocalFileUpdateService {
- late FileUpdationDB _fileUpdationDB;
- late SharedPreferences _prefs;
- late Logger _logger;
- static const isBadCreationTimeImportDone = 'fm_badCreationTime';
- static const isBadCreationTimeMigrationComplete =
- 'fm_badCreationTimeCompleted';
- static const isMissingLocationV2ImportDone = "fm_missingLocationV2ImportDone";
- static const isMissingLocationV2MigrationDone =
- "fm_missingLocationV2MigrationDone";
- static const isBadLocationCordImportDone = "fm_badLocationImportDone";
- static const isBadLocationCordMigrationDone = "fm_badLocationMigrationDone";
- Completer<void>? _existingMigration;
- LocalFileUpdateService._privateConstructor() {
- _logger = Logger((LocalFileUpdateService).toString());
- _fileUpdationDB = FileUpdationDB.instance;
- }
- void init(SharedPreferences preferences) {
- _prefs = preferences;
- }
- static LocalFileUpdateService instance =
- LocalFileUpdateService._privateConstructor();
- bool isBadCreationMigrationCompleted() {
- return (_prefs.getBool(isBadCreationTimeMigrationComplete) ?? false);
- }
- Future<void> markUpdatedFilesForReUpload() async {
- if (_existingMigration != null) {
- _logger.info("migration is already in progress, skipping");
- return _existingMigration!.future;
- }
- _existingMigration = Completer<void>();
- try {
- await _markFilesWhichAreActuallyUpdated();
- if (Platform.isAndroid) {
- await _migrationForFixingBadCreationTime();
- await _migrationFilesWithMissingLocationV2();
- await _migrationFilesWithBadLocationCord();
- }
- } catch (e, s) {
- _logger.severe('failed to perform migration', e, s);
- } finally {
- _existingMigration?.complete();
- _existingMigration = null;
- }
- }
- // This method analyses all of local files for which the file
- // modification/update time was changed. It checks if the existing fileHash
- // is different from the hash of uploaded file. If fileHash are different,
- // then it marks the file for file update.
- Future<void> _markFilesWhichAreActuallyUpdated() async {
- final sTime = DateTime.now().microsecondsSinceEpoch;
- // singleRunLimit indicates number of files to check during single
- // invocation of this method. The limit act as a crude way to limit the
- // resource consumed by the method
- const int singleRunLimit = 10;
- final localIDsToProcess =
- await _fileUpdationDB.getLocalIDsForPotentialReUpload(
- singleRunLimit,
- FileUpdationDB.modificationTimeUpdated,
- );
- if (localIDsToProcess.isNotEmpty) {
- await _checkAndMarkFilesWithDifferentHashForFileUpdate(
- localIDsToProcess,
- );
- final eTime = DateTime.now().microsecondsSinceEpoch;
- final d = Duration(microseconds: eTime - sTime);
- _logger.info(
- 'Performed hashCheck for ${localIDsToProcess.length} updated files '
- 'completed in ${d.inSeconds.toString()} secs',
- );
- }
- }
- Future<void> _checkAndMarkFilesWithDifferentHashForFileUpdate(
- List<String> localIDsToProcess,
- ) async {
- _logger.info("files to process ${localIDsToProcess.length} for reupload");
- final List<ente.File> localFiles =
- await FilesDB.instance.getLocalFiles(localIDsToProcess);
- final Set<String> processedIDs = {};
- for (ente.File file in localFiles) {
- if (processedIDs.contains(file.localID)) {
- continue;
- }
- MediaUploadData uploadData;
- try {
- uploadData = await getUploadData(file);
- if (uploadData.hashData != null &&
- file.hash != null &&
- (file.hash == uploadData.hashData!.fileHash ||
- file.hash == uploadData.hashData!.zipHash)) {
- _logger.info("Skip file update as hash matched ${file.tag}");
- } else {
- _logger.info(
- "Marking for file update as hash did not match ${file.tag}",
- );
- await clearCache(file);
- await FilesDB.instance.updateUploadedFile(
- file.localID!,
- file.title,
- file.location,
- file.creationTime!,
- file.modificationTime!,
- null,
- );
- }
- processedIDs.add(file.localID!);
- } on InvalidFileError {
- // if we fail to get the file, we can ignore the update
- processedIDs.add(file.localID!);
- } catch (e) {
- _logger.severe("Failed to get file uploadData", e);
- } finally {}
- }
- debugPrint("Deleting files ${processedIDs.length}");
- await _fileUpdationDB.deleteByLocalIDs(
- processedIDs.toList(),
- FileUpdationDB.modificationTimeUpdated,
- );
- }
- Future<MediaUploadData> getUploadData(ente.File file) async {
- final mediaUploadData = await getUploadDataFromEnteFile(file);
- // delete the file from app's internal cache if it was copied to app
- // for upload. Shared Media should only be cleared when the upload
- // succeeds.
- if (Platform.isIOS && mediaUploadData.sourceFile != null) {
- await mediaUploadData.sourceFile?.delete();
- }
- return mediaUploadData;
- }
- Future<void> _migrationForFixingBadCreationTime() async {
- if (_prefs.containsKey(isBadCreationTimeMigrationComplete)) {
- return;
- }
- await _importFilesWithBadCreationTime();
- const int singleRunLimit = 100;
- try {
- final generatedIDs =
- await _fileUpdationDB.getLocalIDsForPotentialReUpload(
- singleRunLimit,
- FileUpdationDB.badCreationTime,
- );
- if (generatedIDs.isNotEmpty) {
- final List<int> genIdIntList = [];
- for (String genIdString in generatedIDs) {
- final int? genIdInt = int.tryParse(genIdString);
- if (genIdInt != null) {
- genIdIntList.add(genIdInt);
- }
- }
- final filesWithBadTime =
- (await FilesDB.instance.getFilesFromGeneratedIDs(genIdIntList))
- .values
- .toList();
- filesWithBadTime.removeWhere(
- (e) => e.isUploaded && e.pubMagicMetadata?.editedTime != null,
- );
- await FilesService.instance
- .bulkEditTime(filesWithBadTime, EditTimeSource.fileName);
- } else {
- // everything is done
- await _prefs.setBool(isBadCreationTimeMigrationComplete, true);
- }
- await _fileUpdationDB.deleteByLocalIDs(
- generatedIDs,
- FileUpdationDB.badCreationTime,
- );
- } catch (e) {
- _logger.severe("Failed to fix bad creationTime", e);
- }
- }
- Future<void> _importFilesWithBadCreationTime() async {
- if (_prefs.containsKey(isBadCreationTimeImportDone)) {
- return;
- }
- _logger.info('_importFilesWithBadCreationTime');
- final EnteWatch watch = EnteWatch("_importFilesWithBadCreationTime");
- final int ownerID = Configuration.instance.getUserID()!;
- final filesGeneratedID = await FilesDB.instance
- .getGeneratedIDForFilesOlderThan(jan011981Time, ownerID);
- await _fileUpdationDB.insertMultiple(
- filesGeneratedID,
- FileUpdationDB.badCreationTime,
- );
- watch.log("imported ${filesGeneratedID.length} files");
- _prefs.setBool(isBadCreationTimeImportDone, true);
- }
- Future<void> _migrationFilesWithMissingLocationV2() async {
- if (_prefs.containsKey(isMissingLocationV2MigrationDone)) {
- return;
- }
- await _importForMissingLocationV2();
- const int singleRunLimit = 10;
- final List<String> processedIDs = [];
- try {
- final localIDs = await _fileUpdationDB.getLocalIDsForPotentialReUpload(
- singleRunLimit,
- FileUpdationDB.missingLocationV2,
- );
- if (localIDs.isEmpty) {
- // everything is done
- await _prefs.setBool(isMissingLocationV2MigrationDone, true);
- return;
- }
- final List<ente.File> enteFiles = await FilesDB.instance
- .getFilesForLocalIDs(localIDs, Configuration.instance.getUserID()!);
- // fine localIDs which are not present in enteFiles
- final List<String> missingLocalIDs = [];
- for (String localID in localIDs) {
- if (enteFiles.firstWhereOrNull((e) => e.localID == localID) == null) {
- missingLocalIDs.add(localID);
- }
- }
- processedIDs.addAll(missingLocalIDs);
- final List<ente.File> remoteFilesToUpdate = [];
- final Map<int, Map<String, double>> fileIDToUpdateMetadata = {};
- for (ente.File file in enteFiles) {
- final Location? location = await tryLocationFromExif(file);
- if (location != null && Location.isValidLocation(location)) {
- remoteFilesToUpdate.add(file);
- fileIDToUpdateMetadata[file.uploadedFileID!] = {
- pubMagicKeyLat: location.latitude!,
- pubMagicKeyLong: location.longitude!
- };
- } else if (file.localID != null) {
- processedIDs.add(file.localID!);
- }
- }
- if (remoteFilesToUpdate.isNotEmpty) {
- await FileMagicService.instance.updatePublicMagicMetadata(
- remoteFilesToUpdate,
- null,
- metadataUpdateMap: fileIDToUpdateMetadata,
- );
- for (ente.File file in remoteFilesToUpdate) {
- if (file.localID != null) {
- processedIDs.add(file.localID!);
- }
- }
- }
- } catch (e) {
- _logger.severe("Failed to fix bad creationTime", e);
- } finally {
- await _fileUpdationDB.deleteByLocalIDs(
- processedIDs,
- FileUpdationDB.missingLocationV2,
- );
- }
- }
- Future<void> _importForMissingLocationV2() async {
- if (_prefs.containsKey(isMissingLocationV2ImportDone)) {
- return;
- }
- _logger.info('_importForMissingLocationV2');
- final EnteWatch watch = EnteWatch("_importForMissingLocationV2");
- final int ownerID = Configuration.instance.getUserID()!;
- final List<String> localIDs =
- await FilesDB.instance.getLocalIDsForFilesWithoutLocation(ownerID);
- await _fileUpdationDB.insertMultiple(
- localIDs,
- FileUpdationDB.missingLocationV2,
- );
- watch.log("imported ${localIDs.length} files");
- await _prefs.setBool(isMissingLocationV2ImportDone, true);
- }
- Future<void> _migrationFilesWithBadLocationCord() async {
- if (_prefs.containsKey(isBadLocationCordMigrationDone)) {
- return;
- }
- await _importForBadLocationCord();
- const int singleRunLimit = 10;
- final List<String> processedIDs = [];
- try {
- final localIDs = await _fileUpdationDB.getLocalIDsForPotentialReUpload(
- singleRunLimit,
- FileUpdationDB.badLocationCord,
- );
- if (localIDs.isEmpty) {
- // everything is done
- await _prefs.setBool(isBadLocationCordMigrationDone, true);
- return;
- }
- final List<ente.File> enteFiles = await FilesDB.instance
- .getFilesForLocalIDs(localIDs, Configuration.instance.getUserID()!);
- // fine localIDs which are not present in enteFiles
- final List<String> missingLocalIDs = [];
- for (String localID in localIDs) {
- if (enteFiles.firstWhereOrNull((e) => e.localID == localID) == null) {
- missingLocalIDs.add(localID);
- }
- }
- processedIDs.addAll(missingLocalIDs);
- final List<ente.File> remoteFilesToUpdate = [];
- final Map<int, Map<String, double>> fileIDToUpdateMetadata = {};
- for (ente.File file in enteFiles) {
- final Location? location = await tryLocationFromExif(file);
- if (location != null &&
- (location.latitude ?? 0) != 0.0 &&
- (location.longitude ?? 0) != 0.0) {
- // check if the location is already correct
- if (file.location != null &&
- file.location?.longitude == location.latitude &&
- file.location?.longitude == location.longitude) {
- processedIDs.add(file.localID!);
- } else {
- remoteFilesToUpdate.add(file);
- fileIDToUpdateMetadata[file.uploadedFileID!] = {
- pubMagicKeyLat: location.latitude!,
- pubMagicKeyLong: location.longitude!
- };
- }
- } else if (file.localID != null) {
- processedIDs.add(file.localID!);
- }
- }
- if (remoteFilesToUpdate.isNotEmpty) {
- await FileMagicService.instance.updatePublicMagicMetadata(
- remoteFilesToUpdate,
- null,
- metadataUpdateMap: fileIDToUpdateMetadata,
- );
- for (ente.File file in remoteFilesToUpdate) {
- if (file.localID != null) {
- processedIDs.add(file.localID!);
- }
- }
- }
- } catch (e) {
- _logger.severe("Failed to fix bad location cord", e);
- } finally {
- await _fileUpdationDB.deleteByLocalIDs(
- processedIDs,
- FileUpdationDB.badLocationCord,
- );
- }
- }
- Future<void> _importForBadLocationCord() async {
- if (_prefs.containsKey(isBadLocationCordImportDone)) {
- return;
- }
- _logger.info('_importForBadLocationCord');
- final EnteWatch watch = EnteWatch("_importForBadLocationCord");
- final int ownerID = Configuration.instance.getUserID()!;
- final List<String> localIDs = await FilesDB.instance
- .getFilesWithLocationUploadedBtw20AprTo15May2023(ownerID);
- await _fileUpdationDB.insertMultiple(
- localIDs,
- FileUpdationDB.badLocationCord,
- );
- watch.log("imported ${localIDs.length} files");
- await _prefs.setBool(isBadLocationCordImportDone, true);
- }
- }
|