diff --git a/lib/services/semantic_search/semantic_search_service.dart b/lib/services/semantic_search/semantic_search_service.dart index cdbfd9e83..5249e6a00 100644 --- a/lib/services/semantic_search/semantic_search_service.dart +++ b/lib/services/semantic_search/semantic_search_service.dart @@ -36,7 +36,6 @@ class SemanticSearchService { final _logger = Logger("SemanticSearchService"); final _queue = Queue(); - final _cachedEmbeddings = []; final _mlFramework = kCurrentModel == Model.onnxClip ? ONNX() : GGML(); final _frameworkInitialization = Completer(); @@ -44,6 +43,7 @@ class SemanticSearchService { bool _isComputingEmbeddings = false; bool _isSyncing = false; Future>? _ongoingRequest; + List _cachedEmbeddings = []; PendingQuery? _nextQuery; get hasInitialized => _hasInitialized; @@ -140,18 +140,16 @@ class SemanticSearchService { Future _setupCachedEmbeddings(bool shouldListenForUpdates) async { _logger.info("Setting up cached embeddings"); final startTime = DateTime.now(); - final cachedEmbeddings = await EmbeddingsDB.instance.getAll(kCurrentModel); + _cachedEmbeddings = await EmbeddingsDB.instance.getAll(kCurrentModel); final endTime = DateTime.now(); _logger.info( - "Loading ${cachedEmbeddings.length} took: ${(endTime.millisecondsSinceEpoch - startTime.millisecondsSinceEpoch)}ms", + "Loading ${_cachedEmbeddings.length} took: ${(endTime.millisecondsSinceEpoch - startTime.millisecondsSinceEpoch)}ms", ); - _cachedEmbeddings.addAll(cachedEmbeddings); _logger.info("Cached embeddings: " + _cachedEmbeddings.length.toString()); if (shouldListenForUpdates) { EmbeddingsDB.instance.getStream(kCurrentModel).listen((embeddings) { _logger.info("Updated embeddings: " + embeddings.length.toString()); - _cachedEmbeddings.clear(); - _cachedEmbeddings.addAll(embeddings); + _cachedEmbeddings = embeddings; Bus.instance.fire(EmbeddingUpdatedEvent()); }); }