diff --git a/server/pkg/api/file.go b/server/pkg/api/file.go index a253c71c2..990336e37 100644 --- a/server/pkg/api/file.go +++ b/server/pkg/api/file.go @@ -110,7 +110,7 @@ func (h *FileHandler) GetUploadURLs(c *gin.Context) { userID := auth.GetUserID(c.Request.Header) count, _ := strconv.Atoi(c.Query("count")) - urls, err := h.Controller.GetUploadURLs(c, userID, count, enteApp) + urls, err := h.Controller.GetUploadURLs(c, userID, count, enteApp, false) if err != nil { handler.Error(c, stacktrace.Propagate(err, "")) return diff --git a/server/pkg/api/public_collection.go b/server/pkg/api/public_collection.go index 7a38f4380..9290d6456 100644 --- a/server/pkg/api/public_collection.go +++ b/server/pkg/api/public_collection.go @@ -57,7 +57,7 @@ func (h *PublicCollectionHandler) GetUploadUrls(c *gin.Context) { } userID := collection.Owner.ID count, _ := strconv.Atoi(c.Query("count")) - urls, err := h.FileCtrl.GetUploadURLs(c, userID, count, enteApp) + urls, err := h.FileCtrl.GetUploadURLs(c, userID, count, enteApp, false) if err != nil { handler.Error(c, stacktrace.Propagate(err, "")) return diff --git a/server/pkg/controller/embedding/controller.go b/server/pkg/controller/embedding/controller.go index 349ab9d9d..bf317ccfe 100644 --- a/server/pkg/controller/embedding/controller.go +++ b/server/pkg/controller/embedding/controller.go @@ -30,7 +30,8 @@ import ( const ( // maxEmbeddingDataSize is the min size of an embedding object in bytes - minEmbeddingDataSize = 2048 + minEmbeddingDataSize = 2048 + embeddingFetchTimeout = 15 * gTime.Second ) type Controller struct { @@ -345,7 +346,7 @@ func (c *Controller) getEmbeddingObjectsParallelV2(userID int64, dbEmbeddingRows defer wg.Done() defer func() { <-globalFileFetchSemaphore }() // Release back to global semaphore objectKey := c.getObjectKey(userID, dbEmbeddingRow.FileID, dbEmbeddingRow.Model) - ctx, cancel := context.WithTimeout(context.Background(), gTime.Second*10) // 10 seconds timeout + ctx, cancel := context.WithTimeout(context.Background(), embeddingFetchTimeout) defer cancel() obj, err := c.getEmbeddingObjectWithRetries(ctx, objectKey, downloader, 0) if err != nil { diff --git a/server/pkg/controller/file.go b/server/pkg/controller/file.go index e91d299f1..d7a63d2a9 100644 --- a/server/pkg/controller/file.go +++ b/server/pkg/controller/file.go @@ -258,7 +258,7 @@ func (c *FileController) Update(ctx context.Context, userID int64, file ente.Fil } // GetUploadURLs returns a bunch of presigned URLs for uploading files -func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count int, app ente.App) ([]ente.UploadURL, error) { +func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count int, app ente.App, ignoreLimit bool) ([]ente.UploadURL, error) { err := c.UsageCtrl.CanUploadFile(ctx, userID, nil, app) if err != nil { return []ente.UploadURL{}, stacktrace.Propagate(err, "") @@ -268,7 +268,7 @@ func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count bucket := c.S3Config.GetHotBucket() urls := make([]ente.UploadURL, 0) objectKeys := make([]string, 0) - if count > MaxUploadURLsLimit { + if count > MaxUploadURLsLimit && !ignoreLimit { count = MaxUploadURLsLimit } for i := 0; i < count; i++ { diff --git a/server/pkg/controller/file_copy/file_copy.go b/server/pkg/controller/file_copy/file_copy.go index afab10efe..4f9267e2e 100644 --- a/server/pkg/controller/file_copy/file_copy.go +++ b/server/pkg/controller/file_copy/file_copy.go @@ -92,7 +92,7 @@ func (fc *FileCopyController) CopyFiles(c *gin.Context, req ente.CopyFileSyncReq // request the uploadUrls using existing method. This is to ensure that orphan objects are automatically cleaned up // todo:(neeraj) optimize this method by removing the need for getting a signed url for each object - uploadUrls, err := fc.FileController.GetUploadURLs(c, userID, len(s3ObjectsToCopy), app) + uploadUrls, err := fc.FileController.GetUploadURLs(c, userID, len(s3ObjectsToCopy), app, true) if err != nil { return nil, err }