S3: Fix timeout error when renaming large files (#899)
Remove AWS SDK Transport ResponseHeaderTimeout (finer-grained timeout are already handled by the callers) Lower the threshold for MultipartCopy (5GB -> 500MB) to improve copy performance and reduce chance of hitting Single part copy timeout Fixes #898 Signed-off-by: Maxime Thébault <contact@maximethebault.me>
This commit is contained in:
parent
719f6077ab
commit
bf2dcfe307
1 changed files with 1 additions and 2 deletions
|
@ -299,7 +299,7 @@ func (fs *S3Fs) Rename(source, target string) error {
|
||||||
}
|
}
|
||||||
copySource = pathEscape(copySource)
|
copySource = pathEscape(copySource)
|
||||||
|
|
||||||
if fi.Size() > 5*1024*1024*1024 {
|
if fi.Size() > 500*1024*1024 {
|
||||||
fsLog(fs, logger.LevelDebug, "renaming file %#v with size %v, a multipart copy is required, this may take a while",
|
fsLog(fs, logger.LevelDebug, "renaming file %#v with size %v, a multipart copy is required, this may take a while",
|
||||||
source, fi.Size())
|
source, fi.Size())
|
||||||
err = fs.doMultipartCopy(copySource, target, contentType, fi.Size())
|
err = fs.doMultipartCopy(copySource, target, contentType, fi.Size())
|
||||||
|
@ -945,7 +945,6 @@ func getAWSHTTPClient(timeout int, idleConnectionTimeout time.Duration) *awshttp
|
||||||
}).
|
}).
|
||||||
WithTransportOptions(func(tr *http.Transport) {
|
WithTransportOptions(func(tr *http.Transport) {
|
||||||
tr.IdleConnTimeout = idleConnectionTimeout
|
tr.IdleConnTimeout = idleConnectionTimeout
|
||||||
tr.ResponseHeaderTimeout = 5 * time.Second
|
|
||||||
tr.WriteBufferSize = s3TransferBufferSize
|
tr.WriteBufferSize = s3TransferBufferSize
|
||||||
tr.ReadBufferSize = s3TransferBufferSize
|
tr.ReadBufferSize = s3TransferBufferSize
|
||||||
})
|
})
|
||||||
|
|
Loading…
Reference in a new issue