From 756b122ab8041ab3fb04ef4f45b2a589bf3440ca Mon Sep 17 00:00:00 2001 From: maximethebault Date: Thu, 30 Jun 2022 10:23:39 +0200 Subject: [PATCH] S3: Fix timeout error when renaming large files (#899) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove AWS SDK Transport ResponseHeaderTimeout (finer-grained timeout are already handled by the callers) Lower the threshold for MultipartCopy (5GB -> 500MB) to improve copy performance and reduce chance of hitting Single part copy timeout Fixes #898 Signed-off-by: Maxime Thébault --- vfs/s3fs.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vfs/s3fs.go b/vfs/s3fs.go index e2049464..650c707e 100644 --- a/vfs/s3fs.go +++ b/vfs/s3fs.go @@ -299,7 +299,7 @@ func (fs *S3Fs) Rename(source, target string) error { } copySource = pathEscape(copySource) - if fi.Size() > 5*1024*1024*1024 { + if fi.Size() > 500*1024*1024 { fsLog(fs, logger.LevelDebug, "renaming file %#v with size %v, a multipart copy is required, this may take a while", source, fi.Size()) err = fs.doMultipartCopy(copySource, target, contentType, fi.Size()) @@ -945,7 +945,6 @@ func getAWSHTTPClient(timeout int, idleConnectionTimeout time.Duration) *awshttp }). WithTransportOptions(func(tr *http.Transport) { tr.IdleConnTimeout = idleConnectionTimeout - tr.ResponseHeaderTimeout = 5 * time.Second tr.WriteBufferSize = s3TransferBufferSize tr.ReadBufferSize = s3TransferBufferSize })