s3fs: migrate to AWS SDK V2
Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
parent
7e7f662a23
commit
6f8b71b89f
5 changed files with 440 additions and 295 deletions
26
go.mod
26
go.mod
|
@ -8,7 +8,12 @@ require (
|
|||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0
|
||||
github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962
|
||||
github.com/alexedwards/argon2id v0.0.0-20211130144151-3585854a6387
|
||||
github.com/aws/aws-sdk-go v1.43.17
|
||||
github.com/aws/aws-sdk-go-v2 v1.15.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.15.0
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.10.0
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.26.0
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.0
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.2.8
|
||||
github.com/coreos/go-oidc/v3 v3.1.0
|
||||
github.com/eikenb/pipeat v0.0.0-20210730190139-06b3e6902001
|
||||
|
@ -61,7 +66,7 @@ require (
|
|||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5
|
||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65
|
||||
google.golang.org/api v0.71.0
|
||||
google.golang.org/api v0.72.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
)
|
||||
|
||||
|
@ -70,6 +75,17 @@ require (
|
|||
cloud.google.com/go/compute v1.5.0 // indirect
|
||||
cloud.google.com/go/iam v0.3.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.0 // indirect
|
||||
github.com/aws/smithy-go v1.11.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
|
@ -126,12 +142,12 @@ require (
|
|||
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/mod v0.5.1 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.9 // indirect
|
||||
golang.org/x/tools v0.1.10 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 // indirect
|
||||
google.golang.org/grpc v1.45.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
|
|
53
go.sum
53
go.sum
|
@ -144,20 +144,52 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI
|
|||
github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.34/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.43.17 h1:jDPBz1UuTxmyRo0eLgaRiro0fiI1zL7lkscqYxoEDLM=
|
||||
github.com/aws/aws-sdk-go v1.43.17/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.15.0 h1:f9kWLNfyCzCB43eupDAk3/XgJ2EpgktiySD6leqs0js=
|
||||
github.com/aws/aws-sdk-go-v2 v1.15.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.0 h1:J/tiyHbl07LL4/1i0rFrW5pbLMvo7M6JrekBUNpLeT4=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.0/go.mod h1:ohZjRmiToJ4NybwWTGOCbzlUQU8dxSHxYKzuX7k5l6Y=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.7.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.15.0 h1:cibCYF2c2uq0lsbu0Ggbg8RuGeiHCmXwUlTMS77CiK4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.15.0/go.mod h1:NccaLq2Z9doMmeQXHQRrt2rm+2FbkrcPvfdbCaQn5hY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.10.0 h1:M/FFpf2w31F7xqJqJLgiM0mFpLOtBvwZggORr6QCpo8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.10.0/go.mod h1:HWJMr4ut5X+Lt/7epc7I6Llg5QIcoFHKAeIzw32t6EE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.0 h1:gUlb+I7NwDtqJUIRcFYDiheYa97PdVHG/5Iz+SwdoHE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.0/go.mod h1:prX26x9rmLwkEE1VVCelQOQgRN9sOVIssgowIJ270SE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.0 h1:G/5sApTwgC9qCw1TTtrVsZyZjgNIvo0rl9jjGEICcoY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.0/go.mod h1:1vV+vjdjBD9ZzATKf7rlze/RwvjvluywiMzY12sNGo4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 h1:xiGjGVQsem2cxoIX61uRGy+Jux2s9C/kKbTrWLdrU54=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6/go.mod h1:SSPEdf9spsFgJyhjrXvawfpyzrXHBCUe+2eQ1CjC1Ak=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 h1:bt3zw79tm209glISdMRCIVRCwvSDXxgAxh5KWe2qHkY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0/go.mod h1:viTrxhAuejD+LszDahzAE2x40YjYWhMqzHxv2ZiWaME=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.7 h1:QOMEP8jnO8sm0SX/4G7dbaIq2eEP2wcWEsF0jzrXLJc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.7/go.mod h1:P5sjYYf2nc5dE6cZIzEMsVtq6XeLD7c4rM+kQJPrByA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.0 h1:uhb7moM7VjqIEpWzTpCvceLDSwrWpaleXm39OnVjuLE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.0/go.mod h1:pA2St3Pu2Ldy6fBPY45Azoh1WBG4oS7eIKOd4XN7Meg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.0 h1:IhiVUezzcKlszx6wXSDQYDjEn/bIO6Mc73uNQ1YfTmA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.0/go.mod h1:kLKc4lo+XKlMhENIpKbp7dCePpyUqUG1PqGIAXoxwNE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 h1:YQ3fTXACo7xeAqg0NiqcCmBOXJruUfh+4+O2qxF2EjQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0/go.mod h1:R31ot6BgESRCIoxwfKtIHzZMo/vsZn2un81g9BJ4nmo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.0 h1:i+7ve93k5G0S2xWBu60CKtmzU5RjBj9g7fcSypQNLR0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.0/go.mod h1:L8EoTDLnnN2zL7MQPhyfCbmiZqEs8Cw7+1d9RlLXT5s=
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.5.0/go.mod h1:w7JuP9Oq1IKMFQPkNe3V6s9rOssXzOVEMNEqK1L1bao=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.26.0 h1:6IdBZVY8zod9umkwWrtbH2opcM00eKEmIfZKGUg5ywI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.26.0/go.mod h1:WJzrjAFxq82Hl42oh8HuvwpugTgxmoiJBBX8SLwVs74=
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.6.0/go.mod h1:B+7C5UKdVq1ylkI/A6O8wcurFtaux0R1njePNPtKwoA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.10.0/go.mod h1:4dXS5YNqI3SNbetQ7X7vfsMlX6ZnboJA2dulBwJx7+g=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.0 h1:gZLEXLH6NiU8Y52nRhK1jA+9oz7LZzBK242fi/ziXa4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.0/go.mod h1:d1WcT0OjggjQCAdOkph8ijkr5sUwk1IH/VenOn7W1PU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 h1:0+X/rJ2+DTBKWbUsn7WtF0JvNk/fRf928vkFsXkbbZs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.0/go.mod h1:+8k4H2ASUZZXmjx/s3DFLo9tGBb44lkz3XcgfypJY7s=
|
||||
github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
|
||||
github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g=
|
||||
github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -840,8 +872,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -861,7 +893,6 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1047,8 +1078,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1096,8 +1127,8 @@ google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oY
|
|||
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
||||
google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80=
|
||||
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
||||
google.golang.org/api v0.71.0 h1:SgWof18M8V2NylsX7bL4fM28j+nFdRopHZbdipaaw20=
|
||||
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
|
||||
google.golang.org/api v0.72.0 h1:rPZI0IqY9chaZ4Wq1bDz8YVIPT58pCnO6KnkIPq8xe0=
|
||||
google.golang.org/api v0.72.0/go.mod h1:lbd/q6BRFJbdpV6OUCXstVeiI5mL/d3/WifG7iNKnjI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1187,9 +1218,9 @@ google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2
|
|||
google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6 h1:FglFEfyj61zP3c6LgjmVHxYxZWXYul9oiS1EZqD5gLc=
|
||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 h1:ErU+UA6wxadoU8nWrsy5MZUVBs75K17zUCsUCIfrXCE=
|
||||
google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
|
|
|
@ -199,7 +199,7 @@ func (fs *AzureBlobFs) Open(name string, offset int64) (File, *pipeat.PipeReader
|
|||
|
||||
err := fs.handleMultipartDownload(ctx, blockBlob, offset, w)
|
||||
w.CloseWithError(err) //nolint:errcheck
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, w.GetWrittenBytes(), err)
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %+v", name, w.GetWrittenBytes(), err)
|
||||
metric.AZTransferCompleted(w.GetWrittenBytes(), 1, err)
|
||||
}()
|
||||
|
||||
|
@ -233,7 +233,7 @@ func (fs *AzureBlobFs) Create(name string, flag int) (File, *PipeWriter, func(),
|
|||
err := fs.handleMultipartUpload(ctx, r, blockBlob, &headers)
|
||||
r.CloseWithError(err) //nolint:errcheck
|
||||
p.Done(err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v", name, r.GetReadedBytes(), err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %+v", name, r.GetReadedBytes(), err)
|
||||
metric.AZTransferCompleted(r.GetReadedBytes(), 0, err)
|
||||
}()
|
||||
|
||||
|
@ -305,7 +305,7 @@ func (fs *AzureBlobFs) Rename(source, target string) error {
|
|||
err = plugin.Handler.SetModificationTime(fs.getStorageID(), ensureAbsPath(target),
|
||||
util.GetTimeAsMsSinceEpoch(fi.ModTime()))
|
||||
if err != nil {
|
||||
fsLog(fs, logger.LevelWarn, "unable to preserve modification time after renaming %#v -> %#v: %v",
|
||||
fsLog(fs, logger.LevelWarn, "unable to preserve modification time after renaming %#v -> %#v: %+v",
|
||||
source, target, err)
|
||||
}
|
||||
}
|
||||
|
@ -334,7 +334,7 @@ func (fs *AzureBlobFs) Remove(name string, isDir bool) error {
|
|||
metric.AZDeleteObjectCompleted(err)
|
||||
if plugin.Handler.HasMetadater() && err == nil && !isDir {
|
||||
if errMetadata := plugin.Handler.RemoveMetadata(fs.getStorageID(), ensureAbsPath(name)); errMetadata != nil {
|
||||
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %#v: %v", name, errMetadata)
|
||||
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %#v: %+v", name, errMetadata)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -499,7 +499,7 @@ func (*AzureBlobFs) IsNotExist(err error) bool {
|
|||
return errResp.StatusCode() == http.StatusNotFound
|
||||
}
|
||||
|
||||
return strings.Contains(err.Error(), "404")
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPermission returns a boolean indicating whether the error is known to
|
||||
|
@ -510,10 +510,11 @@ func (*AzureBlobFs) IsPermission(err error) bool {
|
|||
}
|
||||
var errResp *azblob.StorageError
|
||||
if errors.As(err, &errResp) {
|
||||
return errResp.StatusCode() == http.StatusForbidden
|
||||
statusCode := errResp.StatusCode()
|
||||
return statusCode == http.StatusForbidden || statusCode == http.StatusUnauthorized
|
||||
}
|
||||
|
||||
return strings.Contains(err.Error(), "403")
|
||||
return false
|
||||
}
|
||||
|
||||
// IsNotSupported returns true if the error indicate an unsupported operation
|
||||
|
@ -655,14 +656,7 @@ func (fs *AzureBlobFs) GetRelativePath(name string) string {
|
|||
// Walk walks the file tree rooted at root, calling walkFn for each file or
|
||||
// directory in the tree, including root
|
||||
func (fs *AzureBlobFs) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
prefix := ""
|
||||
if root != "" && root != "." {
|
||||
prefix = strings.TrimPrefix(root, "/")
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
|
||||
prefix := fs.getPrefix(root)
|
||||
timeout := int32(fs.ctxTimeout / time.Second)
|
||||
pager := fs.containerClient.ListBlobsFlat(&azblob.ContainerListBlobFlatSegmentOptions{
|
||||
Prefix: &prefix,
|
||||
|
@ -819,13 +813,7 @@ func (fs *AzureBlobFs) checkIfBucketExists() error {
|
|||
|
||||
func (fs *AzureBlobFs) hasContents(name string) (bool, error) {
|
||||
result := false
|
||||
prefix := ""
|
||||
if name != "" && name != "." {
|
||||
prefix = strings.TrimPrefix(name, "/")
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
prefix := fs.getPrefix(name)
|
||||
|
||||
maxResults := int32(1)
|
||||
timeout := int32(fs.ctxTimeout / time.Second)
|
||||
|
@ -939,7 +927,7 @@ func (fs *AzureBlobFs) handleMultipartDownload(ctx context.Context, blockBlob az
|
|||
if err != nil {
|
||||
errOnce.Do(func() {
|
||||
poolError = err
|
||||
fsLog(fs, logger.LevelError, "multipart download error: %v", poolError)
|
||||
fsLog(fs, logger.LevelError, "multipart download error: %+v", poolError)
|
||||
poolCancel()
|
||||
})
|
||||
}
|
||||
|
@ -1019,7 +1007,7 @@ func (fs *AzureBlobFs) handleMultipartUpload(ctx context.Context, reader io.Read
|
|||
if err != nil {
|
||||
errOnce.Do(func() {
|
||||
poolError = err
|
||||
fsLog(fs, logger.LevelDebug, "multipart upload error: %v", poolError)
|
||||
fsLog(fs, logger.LevelDebug, "multipart upload error: %+v", poolError)
|
||||
poolCancel()
|
||||
})
|
||||
}
|
||||
|
|
38
vfs/gcsfs.go
38
vfs/gcsfs.go
|
@ -32,7 +32,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
defaultPageSize = 5000
|
||||
defaultGCSPageSize = 5000
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -163,7 +163,7 @@ func (fs *GCSFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, fu
|
|||
|
||||
n, err := io.Copy(w, objectReader)
|
||||
w.CloseWithError(err) //nolint:errcheck
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %+v", name, n, err)
|
||||
metric.GCSTransferCompleted(n, 1, err)
|
||||
}()
|
||||
return nil, r, cancelFn, nil
|
||||
|
@ -205,7 +205,7 @@ func (fs *GCSFs) Create(name string, flag int) (File, *PipeWriter, func(), error
|
|||
}
|
||||
r.CloseWithError(err) //nolint:errcheck
|
||||
p.Done(err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, acl: %#v, readed bytes: %v, err: %v",
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, acl: %#v, readed bytes: %v, err: %+v",
|
||||
name, fs.config.ACL, n, err)
|
||||
metric.GCSTransferCompleted(n, 0, err)
|
||||
}()
|
||||
|
@ -268,7 +268,7 @@ func (fs *GCSFs) Rename(source, target string) error {
|
|||
err = plugin.Handler.SetModificationTime(fs.getStorageID(), ensureAbsPath(target),
|
||||
util.GetTimeAsMsSinceEpoch(fi.ModTime()))
|
||||
if err != nil {
|
||||
fsLog(fs, logger.LevelWarn, "unable to preserve modification time after renaming %#v -> %#v: %v",
|
||||
fsLog(fs, logger.LevelWarn, "unable to preserve modification time after renaming %#v -> %#v: %+v",
|
||||
source, target, err)
|
||||
}
|
||||
}
|
||||
|
@ -301,7 +301,7 @@ func (fs *GCSFs) Remove(name string, isDir bool) error {
|
|||
metric.GCSDeleteObjectCompleted(err)
|
||||
if plugin.Handler.HasMetadater() && err == nil && !isDir {
|
||||
if errMetadata := plugin.Handler.RemoveMetadata(fs.getStorageID(), ensureAbsPath(name)); errMetadata != nil {
|
||||
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %#v: %v", name, errMetadata)
|
||||
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %#v: %+v", name, errMetadata)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -393,7 +393,7 @@ func (fs *GCSFs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
|||
|
||||
bkt := fs.svc.Bucket(fs.config.Bucket)
|
||||
it := bkt.Objects(ctx, query)
|
||||
pager := iterator.NewPager(it, defaultPageSize, "")
|
||||
pager := iterator.NewPager(it, defaultGCSPageSize, "")
|
||||
|
||||
for {
|
||||
var objects []*storage.ObjectAttrs
|
||||
|
@ -474,7 +474,7 @@ func (*GCSFs) IsNotExist(err error) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
return strings.Contains(err.Error(), "404")
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPermission returns a boolean indicating whether the error is known to
|
||||
|
@ -488,7 +488,7 @@ func (*GCSFs) IsPermission(err error) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
return strings.Contains(err.Error(), "403")
|
||||
return false
|
||||
}
|
||||
|
||||
// IsNotSupported returns true if the error indicate an unsupported operation
|
||||
|
@ -521,7 +521,7 @@ func (fs *GCSFs) ScanRootDirContents() (int, int64, error) {
|
|||
|
||||
bkt := fs.svc.Bucket(fs.config.Bucket)
|
||||
it := bkt.Objects(ctx, query)
|
||||
pager := iterator.NewPager(it, defaultPageSize, "")
|
||||
pager := iterator.NewPager(it, defaultGCSPageSize, "")
|
||||
|
||||
for {
|
||||
var objects []*storage.ObjectAttrs
|
||||
|
@ -573,7 +573,7 @@ func (fs *GCSFs) getFileNamesInPrefix(fsPrefix string) (map[string]bool, error)
|
|||
|
||||
bkt := fs.svc.Bucket(fs.config.Bucket)
|
||||
it := bkt.Objects(ctx, query)
|
||||
pager := iterator.NewPager(it, defaultPageSize, "")
|
||||
pager := iterator.NewPager(it, defaultGCSPageSize, "")
|
||||
|
||||
for {
|
||||
var objects []*storage.ObjectAttrs
|
||||
|
@ -651,13 +651,7 @@ func (fs *GCSFs) GetRelativePath(name string) string {
|
|||
// Walk walks the file tree rooted at root, calling walkFn for each file or
|
||||
// directory in the tree, including root
|
||||
func (fs *GCSFs) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
prefix := ""
|
||||
if root != "" && root != "." {
|
||||
prefix = strings.TrimPrefix(root, "/")
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
prefix := fs.getPrefix(root)
|
||||
|
||||
query := &storage.Query{Prefix: prefix}
|
||||
err := query.SetAttrSelection(gcsDefaultFieldsSelection)
|
||||
|
@ -671,7 +665,7 @@ func (fs *GCSFs) Walk(root string, walkFn filepath.WalkFunc) error {
|
|||
|
||||
bkt := fs.svc.Bucket(fs.config.Bucket)
|
||||
it := bkt.Objects(ctx, query)
|
||||
pager := iterator.NewPager(it, defaultPageSize, "")
|
||||
pager := iterator.NewPager(it, defaultGCSPageSize, "")
|
||||
|
||||
for {
|
||||
var objects []*storage.ObjectAttrs
|
||||
|
@ -784,13 +778,7 @@ func (fs *GCSFs) checkIfBucketExists() error {
|
|||
|
||||
func (fs *GCSFs) hasContents(name string) (bool, error) {
|
||||
result := false
|
||||
prefix := ""
|
||||
if name != "" && name != "." {
|
||||
prefix = strings.TrimPrefix(name, "/")
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
prefix := fs.getPrefix(name)
|
||||
query := &storage.Query{Prefix: prefix}
|
||||
err := query.SetAttrSelection(gcsDefaultFieldsSelection)
|
||||
if err != nil {
|
||||
|
|
582
vfs/s3fs.go
582
vfs/s3fs.go
|
@ -5,8 +5,11 @@ package vfs
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -14,14 +17,15 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
"github.com/eikenb/pipeat"
|
||||
"github.com/pkg/sftp"
|
||||
|
||||
|
@ -32,19 +36,21 @@ import (
|
|||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
// using this mime type for directories improves compatibility with s3fs-fuse
|
||||
const s3DirMimeType = "application/x-directory"
|
||||
const (
|
||||
// using this mime type for directories improves compatibility with s3fs-fuse
|
||||
s3DirMimeType = "application/x-directory"
|
||||
s3TransferBufferSize = 256 * 1024
|
||||
)
|
||||
|
||||
// S3Fs is a Fs implementation for AWS S3 compatible object storages
|
||||
type S3Fs struct {
|
||||
connectionID string
|
||||
localTempDir string
|
||||
// if not empty this fs is mouted as virtual folder in the specified path
|
||||
mountPath string
|
||||
config *S3FsConfig
|
||||
svc *s3.S3
|
||||
ctxTimeout time.Duration
|
||||
ctxLongTimeout time.Duration
|
||||
mountPath string
|
||||
config *S3FsConfig
|
||||
svc *s3.Client
|
||||
ctxTimeout time.Duration
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -53,7 +59,7 @@ func init() {
|
|||
|
||||
// NewS3Fs returns an S3Fs object that allows to interact with an s3 compatible
|
||||
// object storage
|
||||
func NewS3Fs(connectionID, localTempDir, mountPath string, config S3FsConfig) (Fs, error) {
|
||||
func NewS3Fs(connectionID, localTempDir, mountPath string, s3Config S3FsConfig) (Fs, error) {
|
||||
if localTempDir == "" {
|
||||
if tempPath != "" {
|
||||
localTempDir = tempPath
|
||||
|
@ -62,51 +68,56 @@ func NewS3Fs(connectionID, localTempDir, mountPath string, config S3FsConfig) (F
|
|||
}
|
||||
}
|
||||
fs := &S3Fs{
|
||||
connectionID: connectionID,
|
||||
localTempDir: localTempDir,
|
||||
mountPath: mountPath,
|
||||
config: &config,
|
||||
ctxTimeout: 30 * time.Second,
|
||||
ctxLongTimeout: 300 * time.Second,
|
||||
connectionID: connectionID,
|
||||
localTempDir: localTempDir,
|
||||
mountPath: mountPath,
|
||||
config: &s3Config,
|
||||
ctxTimeout: 30 * time.Second,
|
||||
}
|
||||
if err := fs.config.Validate(); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
awsConfig := aws.NewConfig()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if fs.config.Region != "" {
|
||||
awsConfig.WithRegion(fs.config.Region)
|
||||
awsConfig, err := config.LoadDefaultConfig(ctx, config.WithHTTPClient(getAWSHTTPClient(0, 30*time.Second)))
|
||||
if err != nil {
|
||||
return fs, fmt.Errorf("unable to get AWS config: %w", err)
|
||||
}
|
||||
if fs.config.Region != "" {
|
||||
awsConfig.Region = fs.config.Region
|
||||
}
|
||||
|
||||
if !fs.config.AccessSecret.IsEmpty() {
|
||||
if err := fs.config.AccessSecret.TryDecrypt(); err != nil {
|
||||
return fs, err
|
||||
}
|
||||
awsConfig.Credentials = credentials.NewStaticCredentials(fs.config.AccessKey,
|
||||
fs.config.AccessSecret.GetPayload(), fs.config.SessionToken)
|
||||
awsConfig.Credentials = aws.NewCredentialsCache(
|
||||
credentials.NewStaticCredentialsProvider(fs.config.AccessKey, fs.config.AccessSecret.GetPayload(),
|
||||
fs.config.SessionToken))
|
||||
}
|
||||
if fs.config.Endpoint != "" {
|
||||
endpointResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
URL: fs.config.Endpoint,
|
||||
HostnameImmutable: fs.config.ForcePathStyle,
|
||||
PartitionID: "aws",
|
||||
SigningRegion: fs.config.Region,
|
||||
Source: aws.EndpointSourceCustom,
|
||||
}, nil
|
||||
})
|
||||
awsConfig.EndpointResolverWithOptions = endpointResolver
|
||||
}
|
||||
|
||||
if fs.config.Endpoint != "" {
|
||||
awsConfig.Endpoint = aws.String(fs.config.Endpoint)
|
||||
}
|
||||
if fs.config.ForcePathStyle {
|
||||
awsConfig.S3ForcePathStyle = aws.Bool(true)
|
||||
}
|
||||
fs.setConfigDefaults()
|
||||
|
||||
sessOpts := session.Options{
|
||||
Config: *awsConfig,
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
}
|
||||
sess, err := session.NewSessionWithOptions(sessOpts)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
if fs.config.RoleARN != "" {
|
||||
creds := stscreds.NewCredentials(sess, fs.config.RoleARN)
|
||||
sess.Config.Credentials = creds
|
||||
client := sts.NewFromConfig(awsConfig)
|
||||
creds := stscreds.NewAssumeRoleProvider(client, fs.config.RoleARN)
|
||||
awsConfig.Credentials = creds
|
||||
}
|
||||
fs.svc = s3.New(sess)
|
||||
fs.svc = s3.NewFromConfig(awsConfig, func(o *s3.Options) {
|
||||
o.UsePathStyle = fs.config.ForcePathStyle
|
||||
})
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
|
@ -123,22 +134,21 @@ func (fs *S3Fs) ConnectionID() string {
|
|||
// Stat returns a FileInfo describing the named file
|
||||
func (fs *S3Fs) Stat(name string) (os.FileInfo, error) {
|
||||
var result *FileInfo
|
||||
if name == "/" || name == "." {
|
||||
if name == "" || name == "/" || name == "." {
|
||||
err := fs.checkIfBucketExists()
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Now(), false))
|
||||
}
|
||||
if "/"+fs.config.KeyPrefix == name+"/" {
|
||||
if fs.config.KeyPrefix == name+"/" {
|
||||
return NewFileInfo(name, true, 0, time.Now(), false), nil
|
||||
}
|
||||
obj, err := fs.headObject(name)
|
||||
if err == nil {
|
||||
// a "dir" has a trailing "/" so we cannot have a directory here
|
||||
objSize := *obj.ContentLength
|
||||
objectModTime := *obj.LastModified
|
||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, false, objSize, objectModTime, false))
|
||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, false, obj.ContentLength,
|
||||
util.GetTimeFromPointer(obj.LastModified), false))
|
||||
}
|
||||
if !fs.IsNotExist(err) {
|
||||
return result, err
|
||||
|
@ -163,9 +173,8 @@ func (fs *S3Fs) getStatForDir(name string) (os.FileInfo, error) {
|
|||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
objSize := *obj.ContentLength
|
||||
objectModTime := *obj.LastModified
|
||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, objSize, objectModTime, false))
|
||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, obj.ContentLength,
|
||||
util.GetTimeFromPointer(obj.LastModified), false))
|
||||
}
|
||||
|
||||
// Lstat returns a FileInfo describing the named file
|
||||
|
@ -180,18 +189,16 @@ func (fs *S3Fs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, fun
|
|||
return nil, nil, nil, err
|
||||
}
|
||||
ctx, cancelFn := context.WithCancel(context.Background())
|
||||
downloader := s3manager.NewDownloaderWithClient(fs.svc)
|
||||
if offset == 0 && fs.config.DownloadPartMaxTime > 0 {
|
||||
downloader.RequestOptions = append(downloader.RequestOptions, func(r *request.Request) {
|
||||
chunkCtx, cancel := context.WithTimeout(r.Context(), time.Duration(fs.config.DownloadPartMaxTime)*time.Second)
|
||||
r.SetContext(chunkCtx)
|
||||
downloader := manager.NewDownloader(fs.svc, func(d *manager.Downloader) {
|
||||
d.Concurrency = fs.config.DownloadConcurrency
|
||||
d.PartSize = fs.config.DownloadPartSize
|
||||
if offset == 0 && fs.config.DownloadPartMaxTime > 0 {
|
||||
d.ClientOptions = append(d.ClientOptions, func(o *s3.Options) {
|
||||
o.HTTPClient = getAWSHTTPClient(fs.config.DownloadPartMaxTime, 100*time.Millisecond)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
cancel()
|
||||
}()
|
||||
})
|
||||
}
|
||||
var streamRange *string
|
||||
if offset > 0 {
|
||||
streamRange = aws.String(fmt.Sprintf("bytes=%v-", offset))
|
||||
|
@ -200,16 +207,13 @@ func (fs *S3Fs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, fun
|
|||
go func() {
|
||||
defer cancelFn()
|
||||
|
||||
n, err := downloader.DownloadWithContext(ctx, w, &s3.GetObjectInput{
|
||||
n, err := downloader.Download(ctx, w, &s3.GetObjectInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(name),
|
||||
Range: streamRange,
|
||||
}, func(d *s3manager.Downloader) {
|
||||
d.Concurrency = fs.config.DownloadConcurrency
|
||||
d.PartSize = fs.config.DownloadPartSize
|
||||
})
|
||||
w.CloseWithError(err) //nolint:errcheck
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
|
||||
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %+v", name, n, err)
|
||||
metric.S3TransferCompleted(n, 1, err)
|
||||
}()
|
||||
return nil, r, cancelFn, nil
|
||||
|
@ -223,43 +227,37 @@ func (fs *S3Fs) Create(name string, flag int) (File, *PipeWriter, func(), error)
|
|||
}
|
||||
p := NewPipeWriter(w)
|
||||
ctx, cancelFn := context.WithCancel(context.Background())
|
||||
uploader := s3manager.NewUploaderWithClient(fs.svc)
|
||||
if fs.config.UploadPartMaxTime > 0 {
|
||||
uploader.RequestOptions = append(uploader.RequestOptions, func(r *request.Request) {
|
||||
chunkCtx, cancel := context.WithTimeout(r.Context(), time.Duration(fs.config.UploadPartMaxTime)*time.Second)
|
||||
r.SetContext(chunkCtx)
|
||||
uploader := manager.NewUploader(fs.svc, func(u *manager.Uploader) {
|
||||
u.Concurrency = fs.config.UploadConcurrency
|
||||
u.PartSize = fs.config.UploadPartSize
|
||||
if fs.config.UploadPartMaxTime > 0 {
|
||||
u.ClientOptions = append(u.ClientOptions, func(o *s3.Options) {
|
||||
o.HTTPClient = getAWSHTTPClient(fs.config.UploadPartMaxTime, 100*time.Millisecond)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
cancel()
|
||||
}()
|
||||
})
|
||||
}
|
||||
go func() {
|
||||
defer cancelFn()
|
||||
|
||||
key := name
|
||||
var contentType string
|
||||
if flag == -1 {
|
||||
contentType = s3DirMimeType
|
||||
} else {
|
||||
contentType = mime.TypeByExtension(path.Ext(name))
|
||||
}
|
||||
response, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
_, err := uploader.Upload(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(key),
|
||||
Key: aws.String(name),
|
||||
Body: r,
|
||||
ACL: util.NilIfEmpty(fs.config.ACL),
|
||||
StorageClass: util.NilIfEmpty(fs.config.StorageClass),
|
||||
ACL: types.ObjectCannedACL(fs.config.ACL),
|
||||
StorageClass: types.StorageClass(fs.config.StorageClass),
|
||||
ContentType: util.NilIfEmpty(contentType),
|
||||
}, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = fs.config.UploadConcurrency
|
||||
u.PartSize = fs.config.UploadPartSize
|
||||
})
|
||||
r.CloseWithError(err) //nolint:errcheck
|
||||
p.Done(err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, acl: %#v, response: %v, readed bytes: %v, err: %+v",
|
||||
name, fs.config.ACL, response, r.GetReadedBytes(), err)
|
||||
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, acl: %#v, readed bytes: %v, err: %+v",
|
||||
name, fs.config.ACL, r.GetReadedBytes(), err)
|
||||
metric.S3TransferCompleted(r.GetReadedBytes(), 0, err)
|
||||
}()
|
||||
return nil, p, cancelFn, nil
|
||||
|
@ -270,11 +268,6 @@ func (fs *S3Fs) Create(name string, flag int) (File, *PipeWriter, func(), error)
|
|||
// rename all the contents too and this could take long time: think
|
||||
// about directories with thousands of files, for each file we should
|
||||
// execute a CopyObject call.
|
||||
// TODO: rename does not work for files bigger than 5GB, implement
|
||||
// multipart copy or wait for this pull request to be merged:
|
||||
//
|
||||
// https://github.com/aws/aws-sdk-go/pull/2653
|
||||
//
|
||||
func (fs *S3Fs) Rename(source, target string) error {
|
||||
if source == target {
|
||||
return nil
|
||||
|
@ -305,25 +298,33 @@ func (fs *S3Fs) Rename(source, target string) error {
|
|||
} else {
|
||||
contentType = mime.TypeByExtension(path.Ext(source))
|
||||
}
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
copySource = pathEscape(copySource)
|
||||
|
||||
_, err = fs.svc.CopyObjectWithContext(ctx, &s3.CopyObjectInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
CopySource: aws.String(pathEscape(copySource)),
|
||||
Key: aws.String(target),
|
||||
StorageClass: util.NilIfEmpty(fs.config.StorageClass),
|
||||
ACL: util.NilIfEmpty(fs.config.ACL),
|
||||
ContentType: util.NilIfEmpty(contentType),
|
||||
})
|
||||
if fi.Size() > 5*1024*1024*1024 {
|
||||
err = fs.doMultipartCopy(copySource, target, contentType, fi.Size())
|
||||
} else {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
_, err = fs.svc.CopyObject(ctx, &s3.CopyObjectInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
CopySource: aws.String(copySource),
|
||||
Key: aws.String(target),
|
||||
StorageClass: types.StorageClass(fs.config.StorageClass),
|
||||
ACL: types.ObjectCannedACL(fs.config.ACL),
|
||||
ContentType: util.NilIfEmpty(contentType),
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
metric.S3CopyObjectCompleted(err)
|
||||
return err
|
||||
}
|
||||
err = fs.svc.WaitUntilObjectExistsWithContext(ctx, &s3.HeadObjectInput{
|
||||
|
||||
waiter := s3.NewObjectExistsWaiter(fs.svc)
|
||||
err = waiter.Wait(context.Background(), &s3.HeadObjectInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(target),
|
||||
})
|
||||
}, 10*time.Second)
|
||||
metric.S3CopyObjectCompleted(err)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -333,7 +334,7 @@ func (fs *S3Fs) Rename(source, target string) error {
|
|||
err = plugin.Handler.SetModificationTime(fs.getStorageID(), ensureAbsPath(target),
|
||||
util.GetTimeAsMsSinceEpoch(fi.ModTime()))
|
||||
if err != nil {
|
||||
fsLog(fs, logger.LevelWarn, "unable to preserve modification time after renaming %#v -> %#v: %v",
|
||||
fsLog(fs, logger.LevelWarn, "unable to preserve modification time after renaming %#v -> %#v: %+v",
|
||||
source, target, err)
|
||||
}
|
||||
}
|
||||
|
@ -358,14 +359,14 @@ func (fs *S3Fs) Remove(name string, isDir bool) error {
|
|||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
_, err := fs.svc.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
|
||||
_, err := fs.svc.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(name),
|
||||
})
|
||||
metric.S3DeleteObjectCompleted(err)
|
||||
if plugin.Handler.HasMetadater() && err == nil && !isDir {
|
||||
if errMetadata := plugin.Handler.RemoveMetadata(fs.getStorageID(), ensureAbsPath(name)); errMetadata != nil {
|
||||
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %#v: %v", name, errMetadata)
|
||||
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %#v: %+v", name, errMetadata)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -437,27 +438,29 @@ func (*S3Fs) Truncate(name string, size int64) error {
|
|||
func (fs *S3Fs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
var result []os.FileInfo
|
||||
// dirname must be already cleaned
|
||||
prefix := ""
|
||||
if dirname != "/" && dirname != "." {
|
||||
prefix = strings.TrimPrefix(dirname, "/")
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
prefix := fs.getPrefix(dirname)
|
||||
|
||||
modTimes, err := getFolderModTimes(fs.getStorageID(), dirname)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
prefixes := make(map[string]bool)
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxLongTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
err = fs.svc.ListObjectsV2PagesWithContext(ctx, &s3.ListObjectsV2Input{
|
||||
paginator := s3.NewListObjectsV2Paginator(fs.svc, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
Delimiter: aws.String("/"),
|
||||
}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
})
|
||||
|
||||
for paginator.HasMorePages() {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
return result, err
|
||||
}
|
||||
for _, p := range page.CommonPrefixes {
|
||||
// prefixes have a trailing slash
|
||||
name, _ := fs.resolve(p.Prefix, prefix)
|
||||
|
@ -471,10 +474,9 @@ func (fs *S3Fs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
|||
prefixes[name] = true
|
||||
}
|
||||
for _, fileObject := range page.Contents {
|
||||
objectSize := *fileObject.Size
|
||||
objectModTime := *fileObject.LastModified
|
||||
objectModTime := util.GetTimeFromPointer(fileObject.LastModified)
|
||||
name, isDir := fs.resolve(fileObject.Key, prefix)
|
||||
if name == "" {
|
||||
if name == "" || name == "/" {
|
||||
continue
|
||||
}
|
||||
if isDir {
|
||||
|
@ -486,12 +488,13 @@ func (fs *S3Fs) ReadDir(dirname string) ([]os.FileInfo, error) {
|
|||
if t, ok := modTimes[name]; ok {
|
||||
objectModTime = util.GetTimeFromMsecSinceEpoch(t)
|
||||
}
|
||||
result = append(result, NewFileInfo(name, (isDir && objectSize == 0), objectSize, objectModTime, false))
|
||||
result = append(result, NewFileInfo(name, (isDir && fileObject.Size == 0), fileObject.Size,
|
||||
objectModTime, false))
|
||||
}
|
||||
return true
|
||||
})
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
return result, err
|
||||
}
|
||||
|
||||
metric.S3ListObjectsCompleted(nil)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// IsUploadResumeSupported returns true if resuming uploads is supported.
|
||||
|
@ -513,23 +516,14 @@ func (*S3Fs) IsNotExist(err error) bool {
|
|||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
if aerr.Code() == s3.ErrCodeNoSuchKey {
|
||||
return true
|
||||
}
|
||||
if aerr.Code() == s3.ErrCodeNoSuchBucket {
|
||||
return true
|
||||
|
||||
var re *awshttp.ResponseError
|
||||
if errors.As(err, &re) {
|
||||
if re.Response != nil {
|
||||
return re.Response.StatusCode == http.StatusNotFound
|
||||
}
|
||||
}
|
||||
if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
|
||||
if multierr.Code() == s3.ErrCodeNoSuchKey {
|
||||
return true
|
||||
}
|
||||
if multierr.Code() == s3.ErrCodeNoSuchBucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return strings.Contains(err.Error(), "404")
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPermission returns a boolean indicating whether the error is known to
|
||||
|
@ -538,7 +532,15 @@ func (*S3Fs) IsPermission(err error) bool {
|
|||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(err.Error(), "403")
|
||||
|
||||
var re *awshttp.ResponseError
|
||||
if errors.As(err, &re) {
|
||||
if re.Response != nil {
|
||||
return re.Response.StatusCode == http.StatusForbidden ||
|
||||
re.Response.StatusCode == http.StatusUnauthorized
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsNotSupported returns true if the error indicate an unsupported operation
|
||||
|
@ -561,25 +563,33 @@ func (fs *S3Fs) CheckRootPath(username string, uid int, gid int) bool {
|
|||
func (fs *S3Fs) ScanRootDirContents() (int, int64, error) {
|
||||
numFiles := 0
|
||||
size := int64(0)
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxLongTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
err := fs.svc.ListObjectsV2PagesWithContext(ctx, &s3.ListObjectsV2Input{
|
||||
paginator := s3.NewListObjectsV2Paginator(fs.svc, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Prefix: aws.String(fs.config.KeyPrefix),
|
||||
}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
})
|
||||
|
||||
for paginator.HasMorePages() {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
return numFiles, size, err
|
||||
}
|
||||
for _, fileObject := range page.Contents {
|
||||
isDir := strings.HasSuffix(*fileObject.Key, "/")
|
||||
if isDir && *fileObject.Size == 0 {
|
||||
isDir := strings.HasSuffix(util.GetStringFromPointer(fileObject.Key), "/")
|
||||
if isDir && fileObject.Size == 0 {
|
||||
continue
|
||||
}
|
||||
numFiles++
|
||||
size += *fileObject.Size
|
||||
size += fileObject.Size
|
||||
}
|
||||
return true
|
||||
})
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
return numFiles, size, err
|
||||
}
|
||||
|
||||
metric.S3ListObjectsCompleted(nil)
|
||||
return numFiles, size, nil
|
||||
}
|
||||
|
||||
func (fs *S3Fs) getFileNamesInPrefix(fsPrefix string) (map[string]bool, error) {
|
||||
|
@ -588,28 +598,36 @@ func (fs *S3Fs) getFileNamesInPrefix(fsPrefix string) (map[string]bool, error) {
|
|||
if fsPrefix != "/" {
|
||||
prefix = strings.TrimPrefix(fsPrefix, "/")
|
||||
}
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxLongTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
err := fs.svc.ListObjectsV2PagesWithContext(ctx, &s3.ListObjectsV2Input{
|
||||
paginator := s3.NewListObjectsV2Paginator(fs.svc, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
Delimiter: aws.String("/"),
|
||||
}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
})
|
||||
|
||||
for paginator.HasMorePages() {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
if err != nil {
|
||||
fsLog(fs, logger.LevelError, "unable to get content for prefix %#v: %+v", prefix, err)
|
||||
return nil, err
|
||||
}
|
||||
return fileNames, err
|
||||
}
|
||||
for _, fileObject := range page.Contents {
|
||||
name, isDir := fs.resolve(fileObject.Key, prefix)
|
||||
if name != "" && !isDir {
|
||||
fileNames[name] = true
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
if err != nil {
|
||||
fsLog(fs, logger.LevelError, "unable to get content for prefix %#v: %v", prefix, err)
|
||||
return nil, err
|
||||
}
|
||||
return fileNames, err
|
||||
|
||||
metric.S3ListObjectsCompleted(nil)
|
||||
return fileNames, nil
|
||||
}
|
||||
|
||||
// CheckMetadata checks the metadata consistency
|
||||
|
@ -637,7 +655,7 @@ func (fs *S3Fs) GetRelativePath(name string) string {
|
|||
rel = ""
|
||||
}
|
||||
if !path.IsAbs(rel) {
|
||||
return "/" + rel
|
||||
rel = "/" + rel
|
||||
}
|
||||
if fs.config.KeyPrefix != "" {
|
||||
if !strings.HasPrefix(rel, "/"+fs.config.KeyPrefix) {
|
||||
|
@ -654,44 +672,44 @@ func (fs *S3Fs) GetRelativePath(name string) string {
|
|||
// Walk walks the file tree rooted at root, calling walkFn for each file or
|
||||
// directory in the tree, including root. The result are unordered
|
||||
func (fs *S3Fs) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
prefix := ""
|
||||
if root != "/" && root != "." {
|
||||
prefix = strings.TrimPrefix(root, "/")
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxLongTimeout))
|
||||
defer cancelFn()
|
||||
prefix := fs.getPrefix(root)
|
||||
|
||||
err := fs.svc.ListObjectsV2PagesWithContext(ctx, &s3.ListObjectsV2Input{
|
||||
paginator := s3.NewListObjectsV2Paginator(fs.svc, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
})
|
||||
|
||||
for paginator.HasMorePages() {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
walkFn(root, NewFileInfo(root, true, 0, time.Now(), false), err) //nolint:errcheck
|
||||
return err
|
||||
}
|
||||
for _, fileObject := range page.Contents {
|
||||
objectSize := *fileObject.Size
|
||||
objectModTime := *fileObject.LastModified
|
||||
isDir := strings.HasSuffix(*fileObject.Key, "/")
|
||||
name := path.Clean(*fileObject.Key)
|
||||
if name == "/" || name == "." {
|
||||
name, isDir := fs.resolve(fileObject.Key, prefix)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
err := walkFn(fs.Join("/", *fileObject.Key), NewFileInfo(name, isDir, objectSize, objectModTime, false), nil)
|
||||
err := walkFn(util.GetStringFromPointer(fileObject.Key),
|
||||
NewFileInfo(name, isDir, fileObject.Size, util.GetTimeFromPointer(fileObject.LastModified), false), nil)
|
||||
if err != nil {
|
||||
return false
|
||||
return err
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
walkFn(root, NewFileInfo(root, true, 0, time.Now(), false), err) //nolint:errcheck
|
||||
}
|
||||
|
||||
return err
|
||||
metric.S3ListObjectsCompleted(nil)
|
||||
walkFn(root, NewFileInfo(root, true, 0, time.Now(), false), nil) //nolint:errcheck
|
||||
return nil
|
||||
}
|
||||
|
||||
// Join joins any number of path elements into a single path
|
||||
func (*S3Fs) Join(elem ...string) string {
|
||||
return path.Join(elem...)
|
||||
return strings.TrimPrefix(path.Join(elem...), "/")
|
||||
}
|
||||
|
||||
// HasVirtualFolders returns true if folders are emulated
|
||||
|
@ -707,20 +725,15 @@ func (fs *S3Fs) ResolvePath(virtualPath string) (string, error) {
|
|||
if !path.IsAbs(virtualPath) {
|
||||
virtualPath = path.Clean("/" + virtualPath)
|
||||
}
|
||||
return fs.Join("/", fs.config.KeyPrefix, virtualPath), nil
|
||||
return fs.Join(fs.config.KeyPrefix, strings.TrimPrefix(virtualPath, "/")), nil
|
||||
}
|
||||
|
||||
func (fs *S3Fs) resolve(name *string, prefix string) (string, bool) {
|
||||
result := strings.TrimPrefix(*name, prefix)
|
||||
result := strings.TrimPrefix(util.GetStringFromPointer(name), prefix)
|
||||
isDir := strings.HasSuffix(result, "/")
|
||||
if isDir {
|
||||
result = strings.TrimSuffix(result, "/")
|
||||
}
|
||||
if strings.Contains(result, "/") {
|
||||
i := strings.Index(result, "/")
|
||||
isDir = true
|
||||
result = result[:i]
|
||||
}
|
||||
return result, isDir
|
||||
}
|
||||
|
||||
|
@ -728,7 +741,7 @@ func (fs *S3Fs) checkIfBucketExists() error {
|
|||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
_, err := fs.svc.HeadBucketWithContext(ctx, &s3.HeadBucketInput{
|
||||
_, err := fs.svc.HeadBucket(ctx, &s3.HeadBucketInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
})
|
||||
metric.S3HeadBucketCompleted(err)
|
||||
|
@ -737,65 +750,148 @@ func (fs *S3Fs) checkIfBucketExists() error {
|
|||
|
||||
func (fs *S3Fs) setConfigDefaults() {
|
||||
if fs.config.UploadPartSize == 0 {
|
||||
fs.config.UploadPartSize = s3manager.DefaultUploadPartSize
|
||||
fs.config.UploadPartSize = manager.DefaultUploadPartSize
|
||||
} else {
|
||||
if fs.config.UploadPartSize < 1024*1024 {
|
||||
fs.config.UploadPartSize *= 1024 * 1024
|
||||
}
|
||||
}
|
||||
if fs.config.UploadConcurrency == 0 {
|
||||
fs.config.UploadConcurrency = s3manager.DefaultUploadConcurrency
|
||||
fs.config.UploadConcurrency = manager.DefaultUploadConcurrency
|
||||
}
|
||||
if fs.config.DownloadPartSize == 0 {
|
||||
fs.config.DownloadPartSize = s3manager.DefaultDownloadPartSize
|
||||
fs.config.DownloadPartSize = manager.DefaultDownloadPartSize
|
||||
} else {
|
||||
if fs.config.DownloadPartSize < 1024*1024 {
|
||||
fs.config.DownloadPartSize *= 1024 * 1024
|
||||
}
|
||||
}
|
||||
if fs.config.DownloadConcurrency == 0 {
|
||||
fs.config.DownloadConcurrency = s3manager.DefaultDownloadConcurrency
|
||||
fs.config.DownloadConcurrency = manager.DefaultDownloadConcurrency
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *S3Fs) hasContents(name string) (bool, error) {
|
||||
prefix := fs.getPrefix(name)
|
||||
paginator := s3.NewListObjectsV2Paginator(fs.svc, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
MaxKeys: 2,
|
||||
})
|
||||
|
||||
if paginator.HasMorePages() {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
page, err := paginator.NextPage(ctx)
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, obj := range page.Contents {
|
||||
name, _ := fs.resolve(obj.Key, prefix)
|
||||
if name == "" || name == "/" {
|
||||
continue
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
metric.S3ListObjectsCompleted(nil)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (fs *S3Fs) doMultipartCopy(source, target, contentType string, fileSize int64) error {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
res, err := fs.svc.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(target),
|
||||
StorageClass: types.StorageClass(fs.config.StorageClass),
|
||||
ACL: types.ObjectCannedACL(fs.config.ACL),
|
||||
ContentType: util.NilIfEmpty(contentType),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create multipart copy request: %w", err)
|
||||
}
|
||||
uploadID := util.GetStringFromPointer(res.UploadId)
|
||||
if uploadID == "" {
|
||||
return errors.New("unable to get multipart copy upload ID")
|
||||
}
|
||||
maxPartSize := int64(500 * 1024 * 1024)
|
||||
completedParts := make([]types.CompletedPart, 0)
|
||||
partNumber := int32(1)
|
||||
|
||||
for copied := int64(0); copied < fileSize; copied += maxPartSize {
|
||||
innerCtx, innerCancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer innerCancelFn()
|
||||
|
||||
partResp, err := fs.svc.UploadPartCopy(innerCtx, &s3.UploadPartCopyInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
CopySource: aws.String(source),
|
||||
Key: aws.String(target),
|
||||
PartNumber: partNumber,
|
||||
UploadId: aws.String(uploadID),
|
||||
CopySourceRange: aws.String(getMultipartCopyRange(copied, maxPartSize, fileSize)),
|
||||
})
|
||||
if err != nil {
|
||||
fsLog(fs, logger.LevelError, "unable to copy part number %v: %+v", partNumber, err)
|
||||
abortCtx, abortCancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer abortCancelFn()
|
||||
|
||||
_, errAbort := fs.svc.AbortMultipartUpload(abortCtx, &s3.AbortMultipartUploadInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(target),
|
||||
UploadId: aws.String(uploadID),
|
||||
})
|
||||
if errAbort != nil {
|
||||
fsLog(fs, logger.LevelError, "unable to abort multipart copy: %+v", errAbort)
|
||||
}
|
||||
return fmt.Errorf("error copying part number %v: %w", partNumber, err)
|
||||
}
|
||||
completedParts = append(completedParts, types.CompletedPart{
|
||||
ETag: partResp.CopyPartResult.ETag,
|
||||
PartNumber: partNumber,
|
||||
})
|
||||
partNumber++
|
||||
}
|
||||
|
||||
completeCtx, completeCancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer completeCancelFn()
|
||||
|
||||
_, err = fs.svc.CompleteMultipartUpload(completeCtx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(target),
|
||||
UploadId: aws.String(uploadID),
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: completedParts,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to complete multipart upload: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *S3Fs) getPrefix(name string) string {
|
||||
prefix := ""
|
||||
if name != "/" && name != "." {
|
||||
if name != "" && name != "." && name != "/" {
|
||||
prefix = strings.TrimPrefix(name, "/")
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
maxResults := int64(2)
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
results, err := fs.svc.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
MaxKeys: &maxResults,
|
||||
})
|
||||
metric.S3ListObjectsCompleted(err)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// MinIO returns no contents while S3 returns 1 object
|
||||
// with the key equal to the prefix for empty directories
|
||||
for _, obj := range results.Contents {
|
||||
name, _ := fs.resolve(obj.Key, prefix)
|
||||
if name == "" || name == "/" {
|
||||
continue
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
return prefix
|
||||
}
|
||||
|
||||
func (fs *S3Fs) headObject(name string) (*s3.HeadObjectOutput, error) {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
obj, err := fs.svc.HeadObjectWithContext(ctx, &s3.HeadObjectInput{
|
||||
obj, err := fs.svc.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
Key: aws.String(name),
|
||||
})
|
||||
|
@ -809,7 +905,7 @@ func (fs *S3Fs) GetMimeType(name string) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return *obj.ContentType, err
|
||||
return util.GetStringFromPointer(obj.ContentType), nil
|
||||
}
|
||||
|
||||
// Close closes the fs
|
||||
|
@ -832,6 +928,32 @@ func (fs *S3Fs) getStorageID() string {
|
|||
return fmt.Sprintf("s3://%v", fs.config.Bucket)
|
||||
}
|
||||
|
||||
func getMultipartCopyRange(start, maxPartSize, fileSize int64) string {
|
||||
end := start + maxPartSize - 1
|
||||
if end > fileSize {
|
||||
end = fileSize - 1
|
||||
}
|
||||
|
||||
return fmt.Sprintf("bytes=%v-%v", start, end)
|
||||
}
|
||||
|
||||
func getAWSHTTPClient(timeout int, idleConnectionTimeout time.Duration) *awshttp.BuildableClient {
|
||||
c := awshttp.NewBuildableClient().
|
||||
WithDialerOptions(func(d *net.Dialer) {
|
||||
d.Timeout = 8 * time.Second
|
||||
}).
|
||||
WithTransportOptions(func(tr *http.Transport) {
|
||||
tr.IdleConnTimeout = idleConnectionTimeout
|
||||
tr.ResponseHeaderTimeout = 5 * time.Second
|
||||
tr.WriteBufferSize = s3TransferBufferSize
|
||||
tr.ReadBufferSize = s3TransferBufferSize
|
||||
})
|
||||
if timeout > 0 {
|
||||
c = c.WithTimeout(time.Duration(timeout) * time.Second)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// ideally we should simply use url.PathEscape:
|
||||
//
|
||||
// https://github.com/awsdocs/aws-doc-sdk-examples/blob/master/go/example_code/s3/s3_copy_object.go#L65
|
||||
|
|
Loading…
Reference in a new issue