S3: Fix timeout error when renaming large files (#899)

Remove AWS SDK Transport ResponseHeaderTimeout (finer-grained timeout are already handled by the callers)
Lower the threshold for MultipartCopy (5GB -> 500MB) to improve copy performance and reduce chance of hitting Single part copy timeout

Fixes #898

Signed-off-by: Maxime Thébault <contact@maximethebault.me>
This commit is contained in:
maximethebault 2022-06-30 10:23:39 +02:00 committed by Nicola Murino
parent e244ba37b2
commit 756b122ab8
No known key found for this signature in database
GPG key ID: 2F1FB59433D5A8CB

View file

@ -299,7 +299,7 @@ func (fs *S3Fs) Rename(source, target string) error {
}
copySource = pathEscape(copySource)
if fi.Size() > 5*1024*1024*1024 {
if fi.Size() > 500*1024*1024 {
fsLog(fs, logger.LevelDebug, "renaming file %#v with size %v, a multipart copy is required, this may take a while",
source, fi.Size())
err = fs.doMultipartCopy(copySource, target, contentType, fi.Size())
@ -945,7 +945,6 @@ func getAWSHTTPClient(timeout int, idleConnectionTimeout time.Duration) *awshttp
}).
WithTransportOptions(func(tr *http.Transport) {
tr.IdleConnTimeout = idleConnectionTimeout
tr.ResponseHeaderTimeout = 5 * time.Second
tr.WriteBufferSize = s3TransferBufferSize
tr.ReadBufferSize = s3TransferBufferSize
})