cloud storage providers: remove head bucket requests
let's just assume the bucket exists on "stat" requests for the "/" path Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
parent
a61211d32c
commit
526f6e0f6b
6 changed files with 11 additions and 121 deletions
2
go.mod
2
go.mod
|
@ -70,7 +70,7 @@ require (
|
|||
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7
|
||||
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9
|
||||
google.golang.org/api v0.91.0
|
||||
google.golang.org/api v0.92.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
)
|
||||
|
||||
|
|
4
go.sum
4
go.sum
|
@ -1118,8 +1118,8 @@ google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3
|
|||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
|
||||
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.91.0 h1:731+JzuwaJoZXRQGmPoBiV+SrsAfUaIkdMCWTcQNPyA=
|
||||
google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.92.0 h1:8JHk7q/+rJla+iRsWj9FQ9/wjv2M1SKtpKSdmLhxPT0=
|
||||
google.golang.org/api v0.92.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
|
@ -399,18 +399,6 @@ var (
|
|||
Help: "The total number of S3 head object errors",
|
||||
})
|
||||
|
||||
// totalS3HeadBucket is the metric that reports the total successful S3 head bucket requests
|
||||
totalS3HeadBucket = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sftpgo_s3_head_bucket",
|
||||
Help: "The total number of successful S3 head bucket requests",
|
||||
})
|
||||
|
||||
// totalS3HeadBucketErrors is the metric that reports the total S3 head bucket errors
|
||||
totalS3HeadBucketErrors = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sftpgo_s3_head_bucket_errors",
|
||||
Help: "The total number of S3 head bucket errors",
|
||||
})
|
||||
|
||||
// totalGCSUploads is the metric that reports the total number of successful GCS uploads
|
||||
totalGCSUploads = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sftpgo_gcs_uploads_total",
|
||||
|
@ -495,18 +483,6 @@ var (
|
|||
Help: "The total number of GCS head object errors",
|
||||
})
|
||||
|
||||
// totalGCSHeadBucket is the metric that reports the total successful GCS head bucket requests
|
||||
totalGCSHeadBucket = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sftpgo_gcs_head_bucket",
|
||||
Help: "The total number of successful GCS head bucket requests",
|
||||
})
|
||||
|
||||
// totalGCSHeadBucketErrors is the metric that reports the total GCS head bucket errors
|
||||
totalGCSHeadBucketErrors = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sftpgo_gcs_head_bucket_errors",
|
||||
Help: "The total number of GCS head bucket errors",
|
||||
})
|
||||
|
||||
// totalAZUploads is the metric that reports the total number of successful Azure uploads
|
||||
totalAZUploads = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sftpgo_az_uploads_total",
|
||||
|
@ -591,18 +567,6 @@ var (
|
|||
Help: "The total number of Azure head object errors",
|
||||
})
|
||||
|
||||
// totalAZHeadContainer is the metric that reports the total successful Azure head container requests
|
||||
totalAZHeadContainer = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sftpgo_az_head_container",
|
||||
Help: "The total number of successful Azure head container requests",
|
||||
})
|
||||
|
||||
// totalAZHeadContainerErrors is the metric that reports the total Azure head container errors
|
||||
totalAZHeadContainerErrors = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sftpgo_az_head_container_errors",
|
||||
Help: "The total number of Azure head container errors",
|
||||
})
|
||||
|
||||
// totalSFTPFsUploads is the metric that reports the total number of successful SFTPFs uploads
|
||||
totalSFTPFsUploads = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sftpgo_sftpfs_uploads_total",
|
||||
|
@ -766,15 +730,6 @@ func S3HeadObjectCompleted(err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// S3HeadBucketCompleted updates metrics after a S3 head bucket request terminates
|
||||
func S3HeadBucketCompleted(err error) {
|
||||
if err == nil {
|
||||
totalS3HeadBucket.Inc()
|
||||
} else {
|
||||
totalS3HeadBucketErrors.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// GCSTransferCompleted updates metrics after a GCS upload or a download
|
||||
func GCSTransferCompleted(bytes int64, transferKind int, err error) {
|
||||
if transferKind == 0 {
|
||||
|
@ -832,15 +787,6 @@ func GCSHeadObjectCompleted(err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// GCSHeadBucketCompleted updates metrics after a GCS head bucket request terminates
|
||||
func GCSHeadBucketCompleted(err error) {
|
||||
if err == nil {
|
||||
totalGCSHeadBucket.Inc()
|
||||
} else {
|
||||
totalGCSHeadBucketErrors.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// AZTransferCompleted updates metrics after a Azure upload or a download
|
||||
func AZTransferCompleted(bytes int64, transferKind int, err error) {
|
||||
if transferKind == 0 {
|
||||
|
@ -898,15 +844,6 @@ func AZHeadObjectCompleted(err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// AZHeadContainerCompleted updates metrics after a Azure head container request terminates
|
||||
func AZHeadContainerCompleted(err error) {
|
||||
if err == nil {
|
||||
totalAZHeadContainer.Inc()
|
||||
} else {
|
||||
totalAZHeadContainerErrors.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// sftpFsTransferCompleted updates metrics after an SFTPFs upload or a download
|
||||
func sftpFsTransferCompleted(bytesSent, bytesReceived int64, transferKind int, err error) {
|
||||
if transferKind == 0 {
|
||||
|
|
|
@ -56,12 +56,11 @@ type AzureBlobFs struct {
|
|||
connectionID string
|
||||
localTempDir string
|
||||
// if not empty this fs is mouted as virtual folder in the specified path
|
||||
mountPath string
|
||||
config *AzBlobFsConfig
|
||||
hasContainerAccess bool
|
||||
containerClient *azblob.ContainerClient
|
||||
ctxTimeout time.Duration
|
||||
ctxLongTimeout time.Duration
|
||||
mountPath string
|
||||
config *AzBlobFsConfig
|
||||
containerClient *azblob.ContainerClient
|
||||
ctxTimeout time.Duration
|
||||
ctxLongTimeout time.Duration
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -124,7 +123,6 @@ func NewAzBlobFs(connectionID, localTempDir, mountPath string, config AzBlobFsCo
|
|||
}
|
||||
fs.containerClient, err = svc.NewContainerClient(fs.config.Container)
|
||||
}
|
||||
fs.hasContainerAccess = false
|
||||
return fs, err
|
||||
}
|
||||
|
||||
|
@ -142,7 +140,6 @@ func NewAzBlobFs(connectionID, localTempDir, mountPath string, config AzBlobFsCo
|
|||
if err != nil {
|
||||
return fs, fmt.Errorf("invalid credentials: %v", err)
|
||||
}
|
||||
fs.hasContainerAccess = true
|
||||
fs.containerClient, err = svc.NewContainerClient(fs.config.Container)
|
||||
return fs, err
|
||||
}
|
||||
|
@ -162,13 +159,7 @@ func (fs *AzureBlobFs) ConnectionID() string {
|
|||
|
||||
// Stat returns a FileInfo describing the named file
|
||||
func (fs *AzureBlobFs) Stat(name string) (os.FileInfo, error) {
|
||||
if name == "" || name == "." {
|
||||
if fs.hasContainerAccess {
|
||||
err := fs.checkIfBucketExists()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if name == "" || name == "/" || name == "." {
|
||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Now(), false))
|
||||
}
|
||||
if fs.config.KeyPrefix == name+"/" {
|
||||
|
@ -835,15 +826,6 @@ func (fs *AzureBlobFs) setConfigDefaults() {
|
|||
}
|
||||
}
|
||||
|
||||
func (fs *AzureBlobFs) checkIfBucketExists() error {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
_, err := fs.containerClient.GetProperties(ctx, &azblob.ContainerGetPropertiesOptions{})
|
||||
metric.AZHeadContainerCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (fs *AzureBlobFs) hasContents(name string) (bool, error) {
|
||||
result := false
|
||||
prefix := fs.getPrefix(name)
|
||||
|
|
|
@ -114,11 +114,7 @@ func (fs *GCSFs) ConnectionID() string {
|
|||
|
||||
// Stat returns a FileInfo describing the named file
|
||||
func (fs *GCSFs) Stat(name string) (os.FileInfo, error) {
|
||||
if name == "" || name == "." {
|
||||
err := fs.checkIfBucketExists()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if name == "" || name == "/" || name == "." {
|
||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Now(), false))
|
||||
}
|
||||
if fs.config.KeyPrefix == name+"/" {
|
||||
|
@ -762,16 +758,6 @@ func (fs *GCSFs) getObjectStat(name string) (string, os.FileInfo, error) {
|
|||
return name + "/", info, err
|
||||
}
|
||||
|
||||
func (fs *GCSFs) checkIfBucketExists() error {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
bkt := fs.svc.Bucket(fs.config.Bucket)
|
||||
_, err := bkt.Attrs(ctx)
|
||||
metric.GCSHeadBucketCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (fs *GCSFs) hasContents(name string) (bool, error) {
|
||||
result := false
|
||||
prefix := fs.getPrefix(name)
|
||||
|
|
|
@ -151,14 +151,10 @@ func (fs *S3Fs) ConnectionID() string {
|
|||
func (fs *S3Fs) Stat(name string) (os.FileInfo, error) {
|
||||
var result *FileInfo
|
||||
if name == "" || name == "/" || name == "." {
|
||||
err := fs.checkIfBucketExists()
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Now(), false))
|
||||
}
|
||||
if fs.config.KeyPrefix == name+"/" {
|
||||
return NewFileInfo(name, true, 0, time.Now(), false), nil
|
||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Now(), false))
|
||||
}
|
||||
obj, err := fs.headObject(name)
|
||||
if err == nil {
|
||||
|
@ -755,17 +751,6 @@ func (fs *S3Fs) resolve(name *string, prefix string) (string, bool) {
|
|||
return result, isDir
|
||||
}
|
||||
|
||||
func (fs *S3Fs) checkIfBucketExists() error {
|
||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
||||
defer cancelFn()
|
||||
|
||||
_, err := fs.svc.HeadBucket(ctx, &s3.HeadBucketInput{
|
||||
Bucket: aws.String(fs.config.Bucket),
|
||||
})
|
||||
metric.S3HeadBucketCompleted(err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (fs *S3Fs) setConfigDefaults() {
|
||||
if fs.config.UploadPartSize == 0 {
|
||||
fs.config.UploadPartSize = manager.DefaultUploadPartSize
|
||||
|
|
Loading…
Reference in a new issue