[server] Fix panic in copy file + increase emb fetch timeout (#1685)

## Description

## Tests
This commit is contained in:
Neeraj Gupta 2024-05-11 09:51:12 +05:30 committed by GitHub
commit 0443946790
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 8 additions and 7 deletions

View file

@ -110,7 +110,7 @@ func (h *FileHandler) GetUploadURLs(c *gin.Context) {
userID := auth.GetUserID(c.Request.Header)
count, _ := strconv.Atoi(c.Query("count"))
urls, err := h.Controller.GetUploadURLs(c, userID, count, enteApp)
urls, err := h.Controller.GetUploadURLs(c, userID, count, enteApp, false)
if err != nil {
handler.Error(c, stacktrace.Propagate(err, ""))
return

View file

@ -57,7 +57,7 @@ func (h *PublicCollectionHandler) GetUploadUrls(c *gin.Context) {
}
userID := collection.Owner.ID
count, _ := strconv.Atoi(c.Query("count"))
urls, err := h.FileCtrl.GetUploadURLs(c, userID, count, enteApp)
urls, err := h.FileCtrl.GetUploadURLs(c, userID, count, enteApp, false)
if err != nil {
handler.Error(c, stacktrace.Propagate(err, ""))
return

View file

@ -30,7 +30,8 @@ import (
const (
// maxEmbeddingDataSize is the min size of an embedding object in bytes
minEmbeddingDataSize = 2048
minEmbeddingDataSize = 2048
embeddingFetchTimeout = 15 * gTime.Second
)
type Controller struct {
@ -345,7 +346,7 @@ func (c *Controller) getEmbeddingObjectsParallelV2(userID int64, dbEmbeddingRows
defer wg.Done()
defer func() { <-globalFileFetchSemaphore }() // Release back to global semaphore
objectKey := c.getObjectKey(userID, dbEmbeddingRow.FileID, dbEmbeddingRow.Model)
ctx, cancel := context.WithTimeout(context.Background(), gTime.Second*10) // 10 seconds timeout
ctx, cancel := context.WithTimeout(context.Background(), embeddingFetchTimeout)
defer cancel()
obj, err := c.getEmbeddingObjectWithRetries(ctx, objectKey, downloader, 0)
if err != nil {

View file

@ -258,7 +258,7 @@ func (c *FileController) Update(ctx context.Context, userID int64, file ente.Fil
}
// GetUploadURLs returns a bunch of presigned URLs for uploading files
func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count int, app ente.App) ([]ente.UploadURL, error) {
func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count int, app ente.App, ignoreLimit bool) ([]ente.UploadURL, error) {
err := c.UsageCtrl.CanUploadFile(ctx, userID, nil, app)
if err != nil {
return []ente.UploadURL{}, stacktrace.Propagate(err, "")
@ -268,7 +268,7 @@ func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count
bucket := c.S3Config.GetHotBucket()
urls := make([]ente.UploadURL, 0)
objectKeys := make([]string, 0)
if count > MaxUploadURLsLimit {
if count > MaxUploadURLsLimit && !ignoreLimit {
count = MaxUploadURLsLimit
}
for i := 0; i < count; i++ {

View file

@ -92,7 +92,7 @@ func (fc *FileCopyController) CopyFiles(c *gin.Context, req ente.CopyFileSyncReq
// request the uploadUrls using existing method. This is to ensure that orphan objects are automatically cleaned up
// todo:(neeraj) optimize this method by removing the need for getting a signed url for each object
uploadUrls, err := fc.FileController.GetUploadURLs(c, userID, len(s3ObjectsToCopy), app)
uploadUrls, err := fc.FileController.GetUploadURLs(c, userID, len(s3ObjectsToCopy), app, true)
if err != nil {
return nil, err
}