S3: Fix timeout error when renaming large files (#899)

Remove AWS SDK Transport ResponseHeaderTimeout (finer-grained timeout are already handled by the callers)
Lower the threshold for MultipartCopy (5GB -> 500MB) to improve copy performance and reduce chance of hitting Single part copy timeout

Fixes #898

Signed-off-by: Maxime Thébault <contact@maximethebault.me>
This commit is contained in:
maximethebault 2022-06-30 10:23:39 +02:00 committed by GitHub
parent 719f6077ab
commit bf2dcfe307
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -299,7 +299,7 @@ func (fs *S3Fs) Rename(source, target string) error {
} }
copySource = pathEscape(copySource) copySource = pathEscape(copySource)
if fi.Size() > 5*1024*1024*1024 { if fi.Size() > 500*1024*1024 {
fsLog(fs, logger.LevelDebug, "renaming file %#v with size %v, a multipart copy is required, this may take a while", fsLog(fs, logger.LevelDebug, "renaming file %#v with size %v, a multipart copy is required, this may take a while",
source, fi.Size()) source, fi.Size())
err = fs.doMultipartCopy(copySource, target, contentType, fi.Size()) err = fs.doMultipartCopy(copySource, target, contentType, fi.Size())
@ -945,7 +945,6 @@ func getAWSHTTPClient(timeout int, idleConnectionTimeout time.Duration) *awshttp
}). }).
WithTransportOptions(func(tr *http.Transport) { WithTransportOptions(func(tr *http.Transport) {
tr.IdleConnTimeout = idleConnectionTimeout tr.IdleConnTimeout = idleConnectionTimeout
tr.ResponseHeaderTimeout = 5 * time.Second
tr.WriteBufferSize = s3TransferBufferSize tr.WriteBufferSize = s3TransferBufferSize
tr.ReadBufferSize = s3TransferBufferSize tr.ReadBufferSize = s3TransferBufferSize
}) })