S3: expose more properties, possible backward incompatible change

Before these changes we implictly set S3ForcePathStyle if an endpoint
was provided.

This can cause issues with some S3 compatible object storages and must
be explicitly set now.

AWS is also deprecating this setting

https://aws.amazon.com/it/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/
This commit is contained in:
Nicola Murino 2021-07-23 16:56:48 +02:00
parent c997ef876c
commit 85a47810ff
No known key found for this signature in database
GPG key ID: 2F1FB59433D5A8CB
9 changed files with 197 additions and 30 deletions

View file

@ -960,6 +960,28 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
u.FsConfig.S3Config.UploadConcurrency = -1
_, _, err = httpdtest.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u.FsConfig.S3Config.UploadConcurrency = 0
u.FsConfig.S3Config.DownloadPartSize = -1
_, resp, err := httpdtest.AddUser(u, http.StatusBadRequest)
if assert.NoError(t, err) {
assert.Contains(t, string(resp), "download_part_size cannot be")
}
u.FsConfig.S3Config.DownloadPartSize = 5001
_, resp, err = httpdtest.AddUser(u, http.StatusBadRequest)
if assert.NoError(t, err) {
assert.Contains(t, string(resp), "download_part_size cannot be")
}
u.FsConfig.S3Config.DownloadPartSize = 0
u.FsConfig.S3Config.DownloadConcurrency = 100
_, resp, err = httpdtest.AddUser(u, http.StatusBadRequest)
if assert.NoError(t, err) {
assert.Contains(t, string(resp), "invalid download concurrency")
}
u.FsConfig.S3Config.DownloadConcurrency = -1
_, resp, err = httpdtest.AddUser(u, http.StatusBadRequest)
if assert.NoError(t, err) {
assert.Contains(t, string(resp), "invalid download concurrency")
}
u = getTestUser()
u.FsConfig.Provider = sdk.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = ""
@ -1035,7 +1057,7 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
u.FsConfig.SFTPConfig.Endpoint = "127.1.1.1:22"
u.FsConfig.SFTPConfig.Username = defaultUsername
u.FsConfig.SFTPConfig.BufferSize = -1
_, resp, err := httpdtest.AddUser(u, http.StatusBadRequest)
_, resp, err = httpdtest.AddUser(u, http.StatusBadRequest)
if assert.NoError(t, err) {
assert.Contains(t, string(resp), "invalid buffer_size")
}
@ -1591,6 +1613,8 @@ func TestUserS3Config(t *testing.T) {
user.FsConfig.S3Config.Endpoint = "http://127.0.0.1:9000"
user.FsConfig.S3Config.UploadPartSize = 8
user.FsConfig.S3Config.DownloadPartMaxTime = 60
user.FsConfig.S3Config.ForcePathStyle = true
user.FsConfig.S3Config.DownloadPartSize = 6
folderName := "vfolderName"
user.VirtualFolders = append(user.VirtualFolders, vfs.VirtualFolder{
BaseVirtualFolder: vfs.BaseVirtualFolder{
@ -1653,6 +1677,7 @@ func TestUserS3Config(t *testing.T) {
user.FsConfig.S3Config.Endpoint = "http://localhost:9000"
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir" //nolint:goconst
user.FsConfig.S3Config.UploadConcurrency = 5
user.FsConfig.S3Config.DownloadConcurrency = 4
user, bb, err := httpdtest.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err, string(bb))
assert.Equal(t, kms.SecretStatusSecretBox, user.FsConfig.S3Config.AccessSecret.GetStatus())
@ -7992,6 +8017,8 @@ func TestUserTemplateMock(t *testing.T) {
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir/"
user.FsConfig.S3Config.UploadPartSize = 5
user.FsConfig.S3Config.UploadConcurrency = 4
user.FsConfig.S3Config.DownloadPartSize = 6
user.FsConfig.S3Config.DownloadConcurrency = 3
csrfToken, err := getCSRFToken(httpBaseURL + webLoginPath)
assert.NoError(t, err)
form := make(url.Values)
@ -8033,6 +8060,8 @@ func TestUserTemplateMock(t *testing.T) {
checkResponseCode(t, http.StatusBadRequest, rr)
form.Set("s3_upload_part_size", strconv.FormatInt(user.FsConfig.S3Config.UploadPartSize, 10))
form.Set("s3_upload_concurrency", strconv.Itoa(user.FsConfig.S3Config.UploadConcurrency))
form.Set("s3_download_part_size", strconv.FormatInt(user.FsConfig.S3Config.DownloadPartSize, 10))
form.Set("s3_download_concurrency", strconv.Itoa(user.FsConfig.S3Config.DownloadConcurrency))
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, webTemplateUser, &b)
@ -8193,6 +8222,8 @@ func TestFolderTemplateMock(t *testing.T) {
form.Set("s3_upload_part_size", "5")
form.Set("s3_upload_concurrency", "4")
form.Set("s3_download_part_max_time", "0")
form.Set("s3_download_part_size", "6")
form.Set("s3_download_concurrency", "2")
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, webTemplateFolder, &b)
setJWTCookieForReq(req, token)
@ -8271,6 +8302,9 @@ func TestWebUserS3Mock(t *testing.T) {
user.FsConfig.S3Config.UploadPartSize = 5
user.FsConfig.S3Config.UploadConcurrency = 4
user.FsConfig.S3Config.DownloadPartMaxTime = 60
user.FsConfig.S3Config.DownloadPartSize = 6
user.FsConfig.S3Config.DownloadConcurrency = 3
user.FsConfig.S3Config.ForcePathStyle = true
user.Description = "s3 tèst user"
form := make(url.Values)
form.Set(csrfFormToken, csrfToken)
@ -8304,6 +8338,7 @@ func TestWebUserS3Mock(t *testing.T) {
form.Set("patterns1", "*.zip")
form.Set("pattern_type1", "denied")
form.Set("max_upload_file_size", "0")
form.Set("s3_force_path_style", "checked")
form.Set("description", user.Description)
form.Add("hooks", "pre_login_disabled")
// test invalid s3_upload_part_size
@ -8314,17 +8349,8 @@ func TestWebUserS3Mock(t *testing.T) {
req.Header.Set("Content-Type", contentType)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr)
// test invalid download max part time
// test invalid s3_upload_concurrency
form.Set("s3_upload_part_size", strconv.FormatInt(user.FsConfig.S3Config.UploadPartSize, 10))
form.Set("s3_download_part_max_time", "a")
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, path.Join(webUserPath, user.Username), &b)
setJWTCookieForReq(req, webToken)
req.Header.Set("Content-Type", contentType)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr)
// test invalid s3_concurrency
form.Set("s3_download_part_max_time", strconv.Itoa(user.FsConfig.S3Config.DownloadPartMaxTime))
form.Set("s3_upload_concurrency", "a")
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, path.Join(webUserPath, user.Username), &b)
@ -8332,8 +8358,35 @@ func TestWebUserS3Mock(t *testing.T) {
req.Header.Set("Content-Type", contentType)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr)
// now add the user
// test invalid s3_download_part_size
form.Set("s3_upload_concurrency", strconv.Itoa(user.FsConfig.S3Config.UploadConcurrency))
form.Set("s3_download_part_size", "a")
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, path.Join(webUserPath, user.Username), &b)
setJWTCookieForReq(req, webToken)
req.Header.Set("Content-Type", contentType)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr)
// test invalid s3_download_concurrency
form.Set("s3_download_part_size", strconv.FormatInt(user.FsConfig.S3Config.DownloadPartSize, 10))
form.Set("s3_download_concurrency", "a")
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, path.Join(webUserPath, user.Username), &b)
setJWTCookieForReq(req, webToken)
req.Header.Set("Content-Type", contentType)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr)
// test invalid s3_download_part_max_time
form.Set("s3_download_concurrency", strconv.Itoa(user.FsConfig.S3Config.DownloadConcurrency))
form.Set("s3_download_part_max_time", "a")
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, path.Join(webUserPath, user.Username), &b)
setJWTCookieForReq(req, webToken)
req.Header.Set("Content-Type", contentType)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr)
// now add the user
form.Set("s3_download_part_max_time", strconv.Itoa(user.FsConfig.S3Config.DownloadPartMaxTime))
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, path.Join(webUserPath, user.Username), &b)
setJWTCookieForReq(req, webToken)
@ -8357,6 +8410,9 @@ func TestWebUserS3Mock(t *testing.T) {
assert.Equal(t, updateUser.FsConfig.S3Config.UploadPartSize, user.FsConfig.S3Config.UploadPartSize)
assert.Equal(t, updateUser.FsConfig.S3Config.UploadConcurrency, user.FsConfig.S3Config.UploadConcurrency)
assert.Equal(t, updateUser.FsConfig.S3Config.DownloadPartMaxTime, user.FsConfig.S3Config.DownloadPartMaxTime)
assert.Equal(t, updateUser.FsConfig.S3Config.DownloadPartSize, user.FsConfig.S3Config.DownloadPartSize)
assert.Equal(t, updateUser.FsConfig.S3Config.DownloadConcurrency, user.FsConfig.S3Config.DownloadConcurrency)
assert.True(t, updateUser.FsConfig.S3Config.ForcePathStyle)
assert.Equal(t, 2, len(updateUser.Filters.FilePatterns))
assert.Equal(t, kms.SecretStatusSecretBox, updateUser.FsConfig.S3Config.AccessSecret.GetStatus())
assert.NotEmpty(t, updateUser.FsConfig.S3Config.AccessSecret.GetPayload())
@ -9001,6 +9057,8 @@ func TestS3WebFolderMock(t *testing.T) {
S3UploadPartSize := 5
S3UploadConcurrency := 4
S3MaxPartDownloadTime := 120
S3DownloadPartSize := 6
S3DownloadConcurrency := 3
form := make(url.Values)
form.Set("mapped_path", mappedPath)
form.Set("name", folderName)
@ -9015,6 +9073,8 @@ func TestS3WebFolderMock(t *testing.T) {
form.Set("s3_key_prefix", S3KeyPrefix)
form.Set("s3_upload_part_size", strconv.Itoa(S3UploadPartSize))
form.Set("s3_download_part_max_time", strconv.Itoa(S3MaxPartDownloadTime))
form.Set("s3_download_part_size", strconv.Itoa(S3DownloadPartSize))
form.Set("s3_download_concurrency", strconv.Itoa(S3DownloadConcurrency))
form.Set("s3_upload_concurrency", "a")
form.Set(csrfFormToken, csrfToken)
b, contentType, err := getMultipartFormData(form, "", "")
@ -9057,6 +9117,9 @@ func TestS3WebFolderMock(t *testing.T) {
assert.Equal(t, S3UploadConcurrency, folder.FsConfig.S3Config.UploadConcurrency)
assert.Equal(t, int64(S3UploadPartSize), folder.FsConfig.S3Config.UploadPartSize)
assert.Equal(t, S3MaxPartDownloadTime, folder.FsConfig.S3Config.DownloadPartMaxTime)
assert.Equal(t, S3DownloadConcurrency, folder.FsConfig.S3Config.DownloadConcurrency)
assert.Equal(t, int64(S3DownloadPartSize), folder.FsConfig.S3Config.DownloadPartSize)
assert.False(t, folder.FsConfig.S3Config.ForcePathStyle)
// update
S3UploadConcurrency = 10
form.Set("s3_upload_concurrency", "b")

View file

@ -2370,10 +2370,19 @@ components:
description: 'the buffer size (in MB) to use for multipart uploads. The minimum allowed part size is 5MB, and if this value is set to zero, the default value (5MB) for the AWS SDK will be used. The minimum allowed value is 5.'
upload_concurrency:
type: integer
description: 'the number of parts to upload in parallel. If this value is set to zero, the default value (2) will be used'
description: 'the number of parts to upload in parallel. If this value is set to zero, the default value (5) will be used'
download_part_size:
type: integer
description: 'the buffer size (in MB) to use for multipart downloads. The minimum allowed part size is 5MB, and if this value is set to zero, the default value (5MB) for the AWS SDK will be used. The minimum allowed value is 5. Ignored for partial downloads'
download_concurrency:
type: integer
description: 'the number of parts to download in parallel. If this value is set to zero, the default value (5) will be used. Ignored for partial downloads'
download_part_max_time:
type: integer
description: 'the maximum time allowed, in seconds, to download a single chunk (5MB). 0 means no timeout. Ignored for non-multipart downloads.'
description: 'the maximum time allowed, in seconds, to download a single chunk (the chunk is defined via "download_part_size"). 0 means no timeout. Ignored for partial downloads.'
force_path_style:
type: boolean
description: 'Set this to "true" to force the request to use path-style addressing, i.e., "http://s3.amazonaws.com/BUCKET/KEY". By default, the S3 client will use virtual hosted bucket addressing when possible ("http://BUCKET.s3.amazonaws.com/KEY")'
key_prefix:
type: string
description: 'key_prefix is similar to a chroot directory for a local filesystem. If specified the user will only see contents that starts with this prefix and so you can restrict access to a specific virtual folder. The prefix, if not empty, must not start with "/" and must end with "/". If empty the whole bucket contents will be available'

View file

@ -686,11 +686,20 @@ func getS3Config(r *http.Request) (vfs.S3FsConfig, error) {
if err != nil {
return config, err
}
config.DownloadPartMaxTime, err = strconv.Atoi(r.Form.Get("s3_download_part_max_time"))
config.UploadConcurrency, err = strconv.Atoi(r.Form.Get("s3_upload_concurrency"))
if err != nil {
return config, err
}
config.UploadConcurrency, err = strconv.Atoi(r.Form.Get("s3_upload_concurrency"))
config.DownloadPartSize, err = strconv.ParseInt(r.Form.Get("s3_download_part_size"), 10, 64)
if err != nil {
return config, err
}
config.DownloadConcurrency, err = strconv.Atoi(r.Form.Get("s3_download_concurrency"))
if err != nil {
return config, err
}
config.ForcePathStyle = r.Form.Get("s3_force_path_style") != ""
config.DownloadPartMaxTime, err = strconv.Atoi(r.Form.Get("s3_download_part_max_time"))
return config, err
}

View file

@ -1057,12 +1057,21 @@ func compareS3Config(expected *vfs.Filesystem, actual *vfs.Filesystem) error {
if expected.S3Config.UploadPartSize != actual.S3Config.UploadPartSize {
return errors.New("fs S3 upload part size mismatch")
}
if expected.S3Config.DownloadPartMaxTime != actual.S3Config.DownloadPartMaxTime {
return errors.New("fs S3 download part max time mismatch")
}
if expected.S3Config.UploadConcurrency != actual.S3Config.UploadConcurrency {
return errors.New("fs S3 upload concurrency mismatch")
}
if expected.S3Config.DownloadPartSize != actual.S3Config.DownloadPartSize {
return errors.New("fs S3 download part size mismatch")
}
if expected.S3Config.DownloadConcurrency != actual.S3Config.DownloadConcurrency {
return errors.New("fs S3 download concurrency mismatch")
}
if expected.S3Config.ForcePathStyle != actual.S3Config.ForcePathStyle {
return errors.New("fs S3 force path style mismatch")
}
if expected.S3Config.DownloadPartMaxTime != actual.S3Config.DownloadPartMaxTime {
return errors.New("fs S3 download part max time mismatch")
}
if expected.S3Config.KeyPrefix != actual.S3Config.KeyPrefix &&
expected.S3Config.KeyPrefix+"/" != actual.S3Config.KeyPrefix {
return errors.New("fs S3 key prefix mismatch")

View file

@ -109,9 +109,20 @@ type S3FsConfig struct {
UploadPartSize int64 `json:"upload_part_size,omitempty"`
// How many parts are uploaded in parallel
UploadConcurrency int `json:"upload_concurrency,omitempty"`
// The buffer size (in MB) to use for multipart downloads. The minimum allowed part size is 5MB,
// and if this value is set to zero, the default value (5MB) for the AWS SDK will be used.
// The minimum allowed value is 5. Ignored for partial downloads.
DownloadPartSize int64 `json:"download_part_size,omitempty"`
// How many parts are downloaded in parallel. Ignored for partial downloads.
DownloadConcurrency int `json:"download_concurrency,omitempty"`
// DownloadPartMaxTime defines the maximum time allowed, in seconds, to download a single chunk (5MB).
// 0 means no timeout. Ignored for non-multipart downloads.
// 0 means no timeout. Ignored for partial downloads.
DownloadPartMaxTime int `json:"download_part_max_time,omitempty"`
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
// will use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`)
ForcePathStyle bool `json:"force_path_style,omitempty"`
}
// GCSFsConfig defines the configuration for Google Cloud Storage based filesystem

View file

@ -72,7 +72,28 @@
placeholder="" value="{{.S3Config.UploadConcurrency}}" min="0"
aria-describedby="S3ConcurrencyHelpBlock">
<small id="S3ConcurrencyHelpBlock" class="form-text text-muted">
How many parts are uploaded in parallel. Zero means the default (2)
How many parts are uploaded in parallel. Zero means the default (5)
</small>
</div>
</div>
<div class="form-group row fsconfig fsconfig-s3fs">
<label for="idS3DLPartSize" class="col-sm-2 col-form-label">DL Part Size (MB)</label>
<div class="col-sm-3">
<input type="number" class="form-control" id="idS3DLPartSize" name="s3_download_part_size" placeholder=""
value="{{.S3Config.DownloadPartSize}}" aria-describedby="S3DLPartSizeHelpBlock">
<small id="S3DLPartSizeHelpBlock" class="form-text text-muted">
The buffer size for multipart downloads. Zero means the default (5 MB). Minimum is 5
</small>
</div>
<div class="col-sm-2"></div>
<label for="idS3DownloadConcurrency" class="col-sm-2 col-form-label">DL Concurrency</label>
<div class="col-sm-3">
<input type="number" class="form-control" id="idS3DownloadConcurrency" name="s3_download_concurrency"
placeholder="" value="{{.S3Config.DownloadConcurrency}}" min="0"
aria-describedby="S3DLConcurrencyHelpBlock">
<small id="S3DLConcurrencyHelpBlock" class="form-text text-muted">
How many parts are downloaded in parallel. Zero means the default (5)
</small>
</div>
</div>
@ -98,6 +119,14 @@
</div>
</div>
<div class="form-group fsconfig fsconfig-s3fs">
<div class="form-check">
<input type="checkbox" class="form-check-input" id="idS3ForcePathStyle" name="s3_force_path_style"
{{if .S3Config.ForcePathStyle}}checked{{end}}>
<label for="idS3ForcePathStyle" class="form-check-label">Use path-style addressing, i.e., "`endpoint`/BUCKET/KEY"</label>
</div>
</div>
<div class="form-group row fsconfig fsconfig-gcsfs">
<label for="idGCSBucket" class="col-sm-2 col-form-label">Bucket</label>
<div class="col-sm-10">

View file

@ -237,7 +237,10 @@ func (f *Filesystem) GetACopy() Filesystem {
KeyPrefix: f.S3Config.KeyPrefix,
UploadPartSize: f.S3Config.UploadPartSize,
UploadConcurrency: f.S3Config.UploadConcurrency,
DownloadPartSize: f.S3Config.DownloadPartSize,
DownloadConcurrency: f.S3Config.DownloadConcurrency,
DownloadPartMaxTime: f.S3Config.DownloadPartMaxTime,
ForcePathStyle: f.S3Config.ForcePathStyle,
},
},
GCSConfig: GCSFsConfig{

View file

@ -84,16 +84,25 @@ func NewS3Fs(connectionID, localTempDir, mountPath string, config S3FsConfig) (F
if fs.config.Endpoint != "" {
awsConfig.Endpoint = aws.String(fs.config.Endpoint)
}
if fs.config.ForcePathStyle {
awsConfig.S3ForcePathStyle = aws.Bool(true)
}
if fs.config.UploadPartSize == 0 {
fs.config.UploadPartSize = s3manager.DefaultUploadPartSize
} else {
fs.config.UploadPartSize *= 1024 * 1024
}
if fs.config.UploadConcurrency == 0 {
fs.config.UploadConcurrency = 2
fs.config.UploadConcurrency = s3manager.DefaultUploadConcurrency
}
if fs.config.DownloadPartSize == 0 {
fs.config.DownloadPartSize = s3manager.DefaultDownloadPartSize
} else {
fs.config.DownloadPartSize *= 1024 * 1024
}
if fs.config.DownloadConcurrency == 0 {
fs.config.DownloadConcurrency = s3manager.DefaultDownloadConcurrency
}
sessOpts := session.Options{
@ -201,6 +210,9 @@ func (fs *S3Fs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, fun
Bucket: aws.String(fs.config.Bucket),
Key: aws.String(name),
Range: streamRange,
}, func(d *s3manager.Downloader) {
d.Concurrency = fs.config.DownloadConcurrency
d.PartSize = fs.config.DownloadPartSize
})
w.CloseWithError(err) //nolint:errcheck
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)

View file

@ -168,6 +168,18 @@ func (c *S3FsConfig) isEqual(other *S3FsConfig) bool {
if c.UploadConcurrency != other.UploadConcurrency {
return false
}
if c.DownloadConcurrency != other.DownloadConcurrency {
return false
}
if c.DownloadPartSize != other.DownloadPartSize {
return false
}
if c.DownloadPartMaxTime != other.DownloadPartMaxTime {
return false
}
if c.ForcePathStyle != other.ForcePathStyle {
return false
}
if c.AccessSecret == nil {
c.AccessSecret = kms.NewEmptySecret()
}
@ -205,6 +217,22 @@ func (c *S3FsConfig) EncryptCredentials(additionalData string) error {
return nil
}
func (c *S3FsConfig) checkPartSizeAndConcurrency() error {
if c.UploadPartSize != 0 && (c.UploadPartSize < 5 || c.UploadPartSize > 5000) {
return errors.New("upload_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
}
if c.UploadConcurrency < 0 || c.UploadConcurrency > 64 {
return fmt.Errorf("invalid upload concurrency: %v", c.UploadConcurrency)
}
if c.DownloadPartSize != 0 && (c.DownloadPartSize < 5 || c.DownloadPartSize > 5000) {
return errors.New("download_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
}
if c.DownloadConcurrency < 0 || c.DownloadConcurrency > 64 {
return fmt.Errorf("invalid download concurrency: %v", c.DownloadConcurrency)
}
return nil
}
// Validate returns an error if the configuration is not valid
func (c *S3FsConfig) Validate() error {
if c.AccessSecret == nil {
@ -228,13 +256,7 @@ func (c *S3FsConfig) Validate() error {
c.KeyPrefix += "/"
}
}
if c.UploadPartSize != 0 && (c.UploadPartSize < 5 || c.UploadPartSize > 5000) {
return errors.New("upload_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
}
if c.UploadConcurrency < 0 || c.UploadConcurrency > 64 {
return fmt.Errorf("invalid upload concurrency: %v", c.UploadConcurrency)
}
return nil
return c.checkPartSizeAndConcurrency()
}
// GCSFsConfig defines the configuration for Google Cloud Storage based filesystem