From 0dcc3c974c9d193371ef4e7a38ab90cb18b5900c Mon Sep 17 00:00:00 2001 From: Neeraj Gupta <254676+ua741@users.noreply.github.com> Date: Sat, 11 May 2024 09:46:39 +0530 Subject: [PATCH] [server] Ignore uploadUrl limit while copying files --- server/pkg/api/file.go | 2 +- server/pkg/api/public_collection.go | 2 +- server/pkg/controller/file.go | 4 ++-- server/pkg/controller/file_copy/file_copy.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/server/pkg/api/file.go b/server/pkg/api/file.go index a253c71c2a..990336e372 100644 --- a/server/pkg/api/file.go +++ b/server/pkg/api/file.go @@ -110,7 +110,7 @@ func (h *FileHandler) GetUploadURLs(c *gin.Context) { userID := auth.GetUserID(c.Request.Header) count, _ := strconv.Atoi(c.Query("count")) - urls, err := h.Controller.GetUploadURLs(c, userID, count, enteApp) + urls, err := h.Controller.GetUploadURLs(c, userID, count, enteApp, false) if err != nil { handler.Error(c, stacktrace.Propagate(err, "")) return diff --git a/server/pkg/api/public_collection.go b/server/pkg/api/public_collection.go index 7a38f43808..9290d64560 100644 --- a/server/pkg/api/public_collection.go +++ b/server/pkg/api/public_collection.go @@ -57,7 +57,7 @@ func (h *PublicCollectionHandler) GetUploadUrls(c *gin.Context) { } userID := collection.Owner.ID count, _ := strconv.Atoi(c.Query("count")) - urls, err := h.FileCtrl.GetUploadURLs(c, userID, count, enteApp) + urls, err := h.FileCtrl.GetUploadURLs(c, userID, count, enteApp, false) if err != nil { handler.Error(c, stacktrace.Propagate(err, "")) return diff --git a/server/pkg/controller/file.go b/server/pkg/controller/file.go index e91d299f15..d7a63d2a97 100644 --- a/server/pkg/controller/file.go +++ b/server/pkg/controller/file.go @@ -258,7 +258,7 @@ func (c *FileController) Update(ctx context.Context, userID int64, file ente.Fil } // GetUploadURLs returns a bunch of presigned URLs for uploading files -func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count int, app ente.App) ([]ente.UploadURL, error) { +func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count int, app ente.App, ignoreLimit bool) ([]ente.UploadURL, error) { err := c.UsageCtrl.CanUploadFile(ctx, userID, nil, app) if err != nil { return []ente.UploadURL{}, stacktrace.Propagate(err, "") @@ -268,7 +268,7 @@ func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count bucket := c.S3Config.GetHotBucket() urls := make([]ente.UploadURL, 0) objectKeys := make([]string, 0) - if count > MaxUploadURLsLimit { + if count > MaxUploadURLsLimit && !ignoreLimit { count = MaxUploadURLsLimit } for i := 0; i < count; i++ { diff --git a/server/pkg/controller/file_copy/file_copy.go b/server/pkg/controller/file_copy/file_copy.go index afab10efee..4f9267e2e9 100644 --- a/server/pkg/controller/file_copy/file_copy.go +++ b/server/pkg/controller/file_copy/file_copy.go @@ -92,7 +92,7 @@ func (fc *FileCopyController) CopyFiles(c *gin.Context, req ente.CopyFileSyncReq // request the uploadUrls using existing method. This is to ensure that orphan objects are automatically cleaned up // todo:(neeraj) optimize this method by removing the need for getting a signed url for each object - uploadUrls, err := fc.FileController.GetUploadURLs(c, userID, len(s3ObjectsToCopy), app) + uploadUrls, err := fc.FileController.GetUploadURLs(c, userID, len(s3ObjectsToCopy), app, true) if err != nil { return nil, err }