2020-01-19 12:58:55 +00:00
|
|
|
// Package vfs provides local and remote filesystems support
|
2020-01-19 06:41:05 +00:00
|
|
|
package vfs
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2020-01-31 18:04:00 +00:00
|
|
|
"fmt"
|
2020-11-17 18:36:39 +00:00
|
|
|
"io"
|
2020-10-25 07:18:48 +00:00
|
|
|
"net/url"
|
2020-01-19 06:41:05 +00:00
|
|
|
"os"
|
2020-01-19 22:23:09 +00:00
|
|
|
"path"
|
2020-06-26 21:38:29 +00:00
|
|
|
"path/filepath"
|
2020-01-19 06:41:05 +00:00
|
|
|
"runtime"
|
2020-01-19 22:23:09 +00:00
|
|
|
"strings"
|
2020-01-19 06:41:05 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/eikenb/pipeat"
|
2021-02-11 18:45:52 +00:00
|
|
|
"github.com/pkg/sftp"
|
2022-01-06 10:54:43 +00:00
|
|
|
"github.com/sftpgo/sdk"
|
|
|
|
"github.com/sftpgo/sdk/plugin/metadata"
|
2020-05-06 17:36:34 +00:00
|
|
|
|
2022-01-06 09:11:47 +00:00
|
|
|
"github.com/drakkan/sftpgo/v2/kms"
|
2021-06-26 05:31:41 +00:00
|
|
|
"github.com/drakkan/sftpgo/v2/logger"
|
2022-01-05 10:37:45 +00:00
|
|
|
"github.com/drakkan/sftpgo/v2/plugin"
|
2021-07-11 13:26:51 +00:00
|
|
|
"github.com/drakkan/sftpgo/v2/util"
|
2020-01-19 06:41:05 +00:00
|
|
|
)
|
|
|
|
|
2020-10-25 07:18:48 +00:00
|
|
|
const dirMimeType = "inode/directory"
|
|
|
|
|
2020-12-25 10:14:08 +00:00
|
|
|
var (
|
2021-02-11 18:45:52 +00:00
|
|
|
validAzAccessTier = []string{"", "Archive", "Hot", "Cool"}
|
|
|
|
// ErrStorageSizeUnavailable is returned if the storage backend does not support getting the size
|
|
|
|
ErrStorageSizeUnavailable = errors.New("unable to get available size for this storage backend")
|
2021-03-21 18:15:47 +00:00
|
|
|
// ErrVfsUnsupported defines the error for an unsupported VFS operation
|
2022-06-13 18:08:49 +00:00
|
|
|
ErrVfsUnsupported = errors.New("not supported")
|
|
|
|
tempPath string
|
|
|
|
sftpFingerprints []string
|
2020-12-25 10:14:08 +00:00
|
|
|
)
|
2020-10-30 21:17:17 +00:00
|
|
|
|
2021-05-27 13:38:27 +00:00
|
|
|
// SetTempPath sets the path for temporary files
|
|
|
|
func SetTempPath(fsPath string) {
|
|
|
|
tempPath = fsPath
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetTempPath returns the path for temporary files
|
|
|
|
func GetTempPath() string {
|
|
|
|
return tempPath
|
|
|
|
}
|
|
|
|
|
2021-04-01 16:53:48 +00:00
|
|
|
// SetSFTPFingerprints sets the SFTP host key fingerprints
|
|
|
|
func SetSFTPFingerprints(fp []string) {
|
|
|
|
sftpFingerprints = fp
|
|
|
|
}
|
|
|
|
|
2020-01-31 18:04:00 +00:00
|
|
|
// Fs defines the interface for filesystem backends
|
2020-01-19 06:41:05 +00:00
|
|
|
type Fs interface {
|
|
|
|
Name() string
|
|
|
|
ConnectionID() string
|
|
|
|
Stat(name string) (os.FileInfo, error)
|
|
|
|
Lstat(name string) (os.FileInfo, error)
|
2020-11-17 18:36:39 +00:00
|
|
|
Open(name string, offset int64) (File, *pipeat.PipeReaderAt, func(), error)
|
|
|
|
Create(name string, flag int) (File, *PipeWriter, func(), error)
|
2020-01-19 06:41:05 +00:00
|
|
|
Rename(source, target string) error
|
|
|
|
Remove(name string, isDir bool) error
|
|
|
|
Mkdir(name string) error
|
|
|
|
Symlink(source, target string) error
|
|
|
|
Chown(name string, uid int, gid int) error
|
|
|
|
Chmod(name string, mode os.FileMode) error
|
2021-12-16 17:18:36 +00:00
|
|
|
Chtimes(name string, atime, mtime time.Time, isUploading bool) error
|
2020-08-20 11:54:36 +00:00
|
|
|
Truncate(name string, size int64) error
|
2020-01-19 06:41:05 +00:00
|
|
|
ReadDir(dirname string) ([]os.FileInfo, error)
|
2020-08-22 12:52:17 +00:00
|
|
|
Readlink(name string) (string, error)
|
2020-01-19 06:41:05 +00:00
|
|
|
IsUploadResumeSupported() bool
|
|
|
|
IsAtomicUploadSupported() bool
|
2020-01-19 12:58:55 +00:00
|
|
|
CheckRootPath(username string, uid int, gid int) bool
|
2022-03-03 11:44:56 +00:00
|
|
|
ResolvePath(virtualPath string) (string, error)
|
2020-01-19 06:41:05 +00:00
|
|
|
IsNotExist(err error) bool
|
|
|
|
IsPermission(err error) bool
|
2020-11-12 09:39:46 +00:00
|
|
|
IsNotSupported(err error) bool
|
2020-01-19 12:58:55 +00:00
|
|
|
ScanRootDirContents() (int, int64, error)
|
2020-06-07 21:30:18 +00:00
|
|
|
GetDirSize(dirname string) (int, int64, error)
|
2020-01-19 06:41:05 +00:00
|
|
|
GetAtomicUploadPath(name string) string
|
2020-01-19 22:23:09 +00:00
|
|
|
GetRelativePath(name string) string
|
2020-06-26 21:38:29 +00:00
|
|
|
Walk(root string, walkFn filepath.WalkFunc) error
|
2020-01-19 06:41:05 +00:00
|
|
|
Join(elem ...string) string
|
2020-07-31 17:24:57 +00:00
|
|
|
HasVirtualFolders() bool
|
2020-08-11 21:56:10 +00:00
|
|
|
GetMimeType(name string) (string, error)
|
2021-02-11 18:45:52 +00:00
|
|
|
GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error)
|
2021-12-16 17:18:36 +00:00
|
|
|
CheckMetadata() error
|
2020-12-12 09:31:09 +00:00
|
|
|
Close() error
|
2020-08-11 21:56:10 +00:00
|
|
|
}
|
|
|
|
|
2022-07-17 14:02:45 +00:00
|
|
|
// FsRealPather is a Fs that implements the RealPath method.
|
|
|
|
type FsRealPather interface {
|
|
|
|
Fs
|
|
|
|
RealPath(p string) (string, error)
|
|
|
|
}
|
|
|
|
|
2021-12-16 17:18:36 +00:00
|
|
|
// fsMetadataChecker is a Fs that implements the getFileNamesInPrefix method.
|
|
|
|
// This interface is used to abstract metadata consistency checks
|
|
|
|
type fsMetadataChecker interface {
|
|
|
|
Fs
|
|
|
|
getFileNamesInPrefix(fsPrefix string) (map[string]bool, error)
|
|
|
|
}
|
|
|
|
|
2020-11-17 18:36:39 +00:00
|
|
|
// File defines an interface representing a SFTPGo file
|
|
|
|
type File interface {
|
|
|
|
io.Reader
|
|
|
|
io.Writer
|
|
|
|
io.Closer
|
|
|
|
io.ReaderAt
|
|
|
|
io.WriterAt
|
|
|
|
io.Seeker
|
|
|
|
Stat() (os.FileInfo, error)
|
|
|
|
Name() string
|
|
|
|
Truncate(size int64) error
|
|
|
|
}
|
|
|
|
|
2020-06-16 20:49:18 +00:00
|
|
|
// QuotaCheckResult defines the result for a quota check
|
|
|
|
type QuotaCheckResult struct {
|
|
|
|
HasSpace bool
|
|
|
|
AllowedSize int64
|
|
|
|
AllowedFiles int
|
|
|
|
UsedSize int64
|
|
|
|
UsedFiles int
|
|
|
|
QuotaSize int64
|
|
|
|
QuotaFiles int
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetRemainingSize returns the remaining allowed size
|
|
|
|
func (q *QuotaCheckResult) GetRemainingSize() int64 {
|
|
|
|
if q.QuotaSize > 0 {
|
|
|
|
return q.QuotaSize - q.UsedSize
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2020-06-18 20:38:03 +00:00
|
|
|
// GetRemainingFiles returns the remaining allowed files
|
2020-06-16 20:49:18 +00:00
|
|
|
func (q *QuotaCheckResult) GetRemainingFiles() int {
|
|
|
|
if q.QuotaFiles > 0 {
|
|
|
|
return q.QuotaFiles - q.UsedFiles
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2020-05-23 09:58:05 +00:00
|
|
|
// S3FsConfig defines the configuration for S3 based filesystem
|
|
|
|
type S3FsConfig struct {
|
2022-01-06 09:11:47 +00:00
|
|
|
sdk.BaseS3FsConfig
|
|
|
|
AccessSecret *kms.Secret `json:"access_secret,omitempty"`
|
2020-05-23 09:58:05 +00:00
|
|
|
}
|
|
|
|
|
2021-09-11 12:19:17 +00:00
|
|
|
// HideConfidentialData hides confidential data
|
|
|
|
func (c *S3FsConfig) HideConfidentialData() {
|
|
|
|
if c.AccessSecret != nil {
|
|
|
|
c.AccessSecret.Hide()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-27 18:10:27 +00:00
|
|
|
func (c *S3FsConfig) isEqual(other *S3FsConfig) bool {
|
|
|
|
if c.Bucket != other.Bucket {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.KeyPrefix != other.KeyPrefix {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.Region != other.Region {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.AccessKey != other.AccessKey {
|
|
|
|
return false
|
|
|
|
}
|
2022-02-28 19:19:13 +00:00
|
|
|
if c.RoleARN != other.RoleARN {
|
|
|
|
return false
|
|
|
|
}
|
2021-03-27 18:10:27 +00:00
|
|
|
if c.Endpoint != other.Endpoint {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.StorageClass != other.StorageClass {
|
|
|
|
return false
|
|
|
|
}
|
2021-11-13 15:05:40 +00:00
|
|
|
if c.ACL != other.ACL {
|
|
|
|
return false
|
|
|
|
}
|
2022-02-25 14:30:04 +00:00
|
|
|
if !c.areMultipartFieldsEqual(other) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.ForcePathStyle != other.ForcePathStyle {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return c.isSecretEqual(other)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *S3FsConfig) areMultipartFieldsEqual(other *S3FsConfig) bool {
|
2021-03-27 18:10:27 +00:00
|
|
|
if c.UploadPartSize != other.UploadPartSize {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.UploadConcurrency != other.UploadConcurrency {
|
|
|
|
return false
|
|
|
|
}
|
2021-07-23 14:56:48 +00:00
|
|
|
if c.DownloadConcurrency != other.DownloadConcurrency {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.DownloadPartSize != other.DownloadPartSize {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.DownloadPartMaxTime != other.DownloadPartMaxTime {
|
|
|
|
return false
|
|
|
|
}
|
2022-02-01 11:15:56 +00:00
|
|
|
if c.UploadPartMaxTime != other.UploadPartMaxTime {
|
|
|
|
return false
|
|
|
|
}
|
2022-02-25 14:30:04 +00:00
|
|
|
return true
|
2021-11-13 15:05:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *S3FsConfig) isSecretEqual(other *S3FsConfig) bool {
|
2021-03-27 18:10:27 +00:00
|
|
|
if c.AccessSecret == nil {
|
|
|
|
c.AccessSecret = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
if other.AccessSecret == nil {
|
|
|
|
other.AccessSecret = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
return c.AccessSecret.IsEqual(other.AccessSecret)
|
|
|
|
}
|
|
|
|
|
2020-12-12 09:31:09 +00:00
|
|
|
func (c *S3FsConfig) checkCredentials() error {
|
|
|
|
if c.AccessKey == "" && !c.AccessSecret.IsEmpty() {
|
|
|
|
return errors.New("access_key cannot be empty with access_secret not empty")
|
|
|
|
}
|
|
|
|
if c.AccessSecret.IsEmpty() && c.AccessKey != "" {
|
|
|
|
return errors.New("access_secret cannot be empty with access_key not empty")
|
|
|
|
}
|
|
|
|
if c.AccessSecret.IsEncrypted() && !c.AccessSecret.IsValid() {
|
|
|
|
return errors.New("invalid encrypted access_secret")
|
|
|
|
}
|
|
|
|
if !c.AccessSecret.IsEmpty() && !c.AccessSecret.IsValidInput() {
|
|
|
|
return errors.New("invalid access_secret")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-28 10:55:01 +00:00
|
|
|
// ValidateAndEncryptCredentials validates the configuration and encrypts access secret if it is in plain text
|
|
|
|
func (c *S3FsConfig) ValidateAndEncryptCredentials(additionalData string) error {
|
|
|
|
if err := c.validate(); err != nil {
|
|
|
|
return util.NewValidationError(fmt.Sprintf("could not validate s3config: %v", err))
|
|
|
|
}
|
2020-12-12 09:31:09 +00:00
|
|
|
if c.AccessSecret.IsPlain() {
|
|
|
|
c.AccessSecret.SetAdditionalData(additionalData)
|
|
|
|
err := c.AccessSecret.Encrypt()
|
|
|
|
if err != nil {
|
2022-04-28 10:55:01 +00:00
|
|
|
return util.NewValidationError(fmt.Sprintf("could not encrypt s3 access secret: %v", err))
|
2020-12-12 09:31:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-07-23 14:56:48 +00:00
|
|
|
func (c *S3FsConfig) checkPartSizeAndConcurrency() error {
|
|
|
|
if c.UploadPartSize != 0 && (c.UploadPartSize < 5 || c.UploadPartSize > 5000) {
|
|
|
|
return errors.New("upload_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
|
|
|
|
}
|
|
|
|
if c.UploadConcurrency < 0 || c.UploadConcurrency > 64 {
|
|
|
|
return fmt.Errorf("invalid upload concurrency: %v", c.UploadConcurrency)
|
|
|
|
}
|
|
|
|
if c.DownloadPartSize != 0 && (c.DownloadPartSize < 5 || c.DownloadPartSize > 5000) {
|
|
|
|
return errors.New("download_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
|
|
|
|
}
|
|
|
|
if c.DownloadConcurrency < 0 || c.DownloadConcurrency > 64 {
|
|
|
|
return fmt.Errorf("invalid download concurrency: %v", c.DownloadConcurrency)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-28 10:55:01 +00:00
|
|
|
// validate returns an error if the configuration is not valid
|
|
|
|
func (c *S3FsConfig) validate() error {
|
2020-12-12 09:31:09 +00:00
|
|
|
if c.AccessSecret == nil {
|
|
|
|
c.AccessSecret = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
if c.Bucket == "" {
|
|
|
|
return errors.New("bucket cannot be empty")
|
|
|
|
}
|
2022-06-20 17:55:01 +00:00
|
|
|
// the region may be embedded within the endpoint for some S3 compatible
|
|
|
|
// object storage, for example B2
|
|
|
|
if c.Endpoint == "" && c.Region == "" {
|
2020-12-12 09:31:09 +00:00
|
|
|
return errors.New("region cannot be empty")
|
|
|
|
}
|
|
|
|
if err := c.checkCredentials(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if c.KeyPrefix != "" {
|
|
|
|
if strings.HasPrefix(c.KeyPrefix, "/") {
|
|
|
|
return errors.New("key_prefix cannot start with /")
|
|
|
|
}
|
|
|
|
c.KeyPrefix = path.Clean(c.KeyPrefix)
|
|
|
|
if !strings.HasSuffix(c.KeyPrefix, "/") {
|
|
|
|
c.KeyPrefix += "/"
|
|
|
|
}
|
|
|
|
}
|
2021-11-13 15:05:40 +00:00
|
|
|
c.StorageClass = strings.TrimSpace(c.StorageClass)
|
|
|
|
c.ACL = strings.TrimSpace(c.ACL)
|
2021-07-23 14:56:48 +00:00
|
|
|
return c.checkPartSizeAndConcurrency()
|
2020-12-12 09:31:09 +00:00
|
|
|
}
|
|
|
|
|
2020-05-23 09:58:05 +00:00
|
|
|
// GCSFsConfig defines the configuration for Google Cloud Storage based filesystem
|
|
|
|
type GCSFsConfig struct {
|
2022-01-06 09:11:47 +00:00
|
|
|
sdk.BaseGCSFsConfig
|
|
|
|
Credentials *kms.Secret `json:"credentials,omitempty"`
|
2020-05-23 09:58:05 +00:00
|
|
|
}
|
|
|
|
|
2021-09-11 12:19:17 +00:00
|
|
|
// HideConfidentialData hides confidential data
|
|
|
|
func (c *GCSFsConfig) HideConfidentialData() {
|
|
|
|
if c.Credentials != nil {
|
|
|
|
c.Credentials.Hide()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-28 10:55:01 +00:00
|
|
|
// ValidateAndEncryptCredentials validates the configuration and encrypts credentials if they are in plain text
|
|
|
|
func (c *GCSFsConfig) ValidateAndEncryptCredentials(additionalData string) error {
|
|
|
|
if err := c.validate(); err != nil {
|
|
|
|
return util.NewValidationError(fmt.Sprintf("could not validate GCS config: %v", err))
|
|
|
|
}
|
|
|
|
if c.Credentials.IsPlain() {
|
|
|
|
c.Credentials.SetAdditionalData(additionalData)
|
|
|
|
err := c.Credentials.Encrypt()
|
|
|
|
if err != nil {
|
|
|
|
return util.NewValidationError(fmt.Sprintf("could not encrypt GCS credentials: %v", err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-27 18:10:27 +00:00
|
|
|
func (c *GCSFsConfig) isEqual(other *GCSFsConfig) bool {
|
|
|
|
if c.Bucket != other.Bucket {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.KeyPrefix != other.KeyPrefix {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.AutomaticCredentials != other.AutomaticCredentials {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.StorageClass != other.StorageClass {
|
|
|
|
return false
|
|
|
|
}
|
2021-11-15 20:57:41 +00:00
|
|
|
if c.ACL != other.ACL {
|
|
|
|
return false
|
|
|
|
}
|
2021-03-27 18:10:27 +00:00
|
|
|
if c.Credentials == nil {
|
|
|
|
c.Credentials = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
if other.Credentials == nil {
|
|
|
|
other.Credentials = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
return c.Credentials.IsEqual(other.Credentials)
|
|
|
|
}
|
|
|
|
|
2022-04-28 10:55:01 +00:00
|
|
|
// validate returns an error if the configuration is not valid
|
|
|
|
func (c *GCSFsConfig) validate() error {
|
2021-11-15 18:12:58 +00:00
|
|
|
if c.Credentials == nil || c.AutomaticCredentials == 1 {
|
2020-12-12 09:31:09 +00:00
|
|
|
c.Credentials = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
if c.Bucket == "" {
|
|
|
|
return errors.New("bucket cannot be empty")
|
|
|
|
}
|
|
|
|
if c.KeyPrefix != "" {
|
|
|
|
if strings.HasPrefix(c.KeyPrefix, "/") {
|
|
|
|
return errors.New("key_prefix cannot start with /")
|
|
|
|
}
|
|
|
|
c.KeyPrefix = path.Clean(c.KeyPrefix)
|
|
|
|
if !strings.HasSuffix(c.KeyPrefix, "/") {
|
|
|
|
c.KeyPrefix += "/"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if c.Credentials.IsEncrypted() && !c.Credentials.IsValid() {
|
|
|
|
return errors.New("invalid encrypted credentials")
|
|
|
|
}
|
2021-11-15 18:12:58 +00:00
|
|
|
if c.AutomaticCredentials == 0 && !c.Credentials.IsValidInput() {
|
2022-04-28 10:55:01 +00:00
|
|
|
return errors.New("invalid credentials")
|
2020-12-12 09:31:09 +00:00
|
|
|
}
|
2021-11-13 15:05:40 +00:00
|
|
|
c.StorageClass = strings.TrimSpace(c.StorageClass)
|
2021-11-15 20:57:41 +00:00
|
|
|
c.ACL = strings.TrimSpace(c.ACL)
|
2020-12-12 09:31:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-25 07:18:48 +00:00
|
|
|
// AzBlobFsConfig defines the configuration for Azure Blob Storage based filesystem
|
|
|
|
type AzBlobFsConfig struct {
|
2022-01-06 09:11:47 +00:00
|
|
|
sdk.BaseAzBlobFsConfig
|
|
|
|
// Storage Account Key leave blank to use SAS URL.
|
|
|
|
// The access key is stored encrypted based on the kms configuration
|
|
|
|
AccountKey *kms.Secret `json:"account_key,omitempty"`
|
|
|
|
// Shared access signature URL, leave blank if using account/key
|
|
|
|
SASURL *kms.Secret `json:"sas_url,omitempty"`
|
2020-10-25 07:18:48 +00:00
|
|
|
}
|
|
|
|
|
2021-09-11 12:19:17 +00:00
|
|
|
// HideConfidentialData hides confidential data
|
|
|
|
func (c *AzBlobFsConfig) HideConfidentialData() {
|
|
|
|
if c.AccountKey != nil {
|
|
|
|
c.AccountKey.Hide()
|
|
|
|
}
|
|
|
|
if c.SASURL != nil {
|
|
|
|
c.SASURL.Hide()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-27 18:10:27 +00:00
|
|
|
func (c *AzBlobFsConfig) isEqual(other *AzBlobFsConfig) bool {
|
|
|
|
if c.Container != other.Container {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.AccountName != other.AccountName {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.Endpoint != other.Endpoint {
|
|
|
|
return false
|
|
|
|
}
|
2021-06-11 20:27:36 +00:00
|
|
|
if c.SASURL.IsEmpty() {
|
|
|
|
c.SASURL = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
if other.SASURL.IsEmpty() {
|
|
|
|
other.SASURL = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
if !c.SASURL.IsEqual(other.SASURL) {
|
2021-03-27 18:10:27 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.KeyPrefix != other.KeyPrefix {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.UploadPartSize != other.UploadPartSize {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.UploadConcurrency != other.UploadConcurrency {
|
|
|
|
return false
|
|
|
|
}
|
2022-02-21 18:01:31 +00:00
|
|
|
if c.DownloadPartSize != other.DownloadPartSize {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.DownloadConcurrency != other.DownloadConcurrency {
|
|
|
|
return false
|
|
|
|
}
|
2021-03-27 18:10:27 +00:00
|
|
|
if c.UseEmulator != other.UseEmulator {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.AccessTier != other.AccessTier {
|
|
|
|
return false
|
|
|
|
}
|
2022-02-21 18:01:31 +00:00
|
|
|
return c.isSecretEqual(other)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *AzBlobFsConfig) isSecretEqual(other *AzBlobFsConfig) bool {
|
2021-03-27 18:10:27 +00:00
|
|
|
if c.AccountKey == nil {
|
|
|
|
c.AccountKey = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
if other.AccountKey == nil {
|
|
|
|
other.AccountKey = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
return c.AccountKey.IsEqual(other.AccountKey)
|
|
|
|
}
|
|
|
|
|
2022-04-28 10:55:01 +00:00
|
|
|
// ValidateAndEncryptCredentials validates the configuration and encrypts access secret if it is in plain text
|
|
|
|
func (c *AzBlobFsConfig) ValidateAndEncryptCredentials(additionalData string) error {
|
|
|
|
if err := c.validate(); err != nil {
|
|
|
|
return util.NewValidationError(fmt.Sprintf("could not validate Azure Blob config: %v", err))
|
|
|
|
}
|
2020-12-12 09:31:09 +00:00
|
|
|
if c.AccountKey.IsPlain() {
|
|
|
|
c.AccountKey.SetAdditionalData(additionalData)
|
|
|
|
if err := c.AccountKey.Encrypt(); err != nil {
|
2022-04-28 10:55:01 +00:00
|
|
|
return util.NewValidationError(fmt.Sprintf("could not encrypt Azure blob account key: %v", err))
|
2020-12-12 09:31:09 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-11 20:27:36 +00:00
|
|
|
if c.SASURL.IsPlain() {
|
|
|
|
c.SASURL.SetAdditionalData(additionalData)
|
|
|
|
if err := c.SASURL.Encrypt(); err != nil {
|
2022-04-28 10:55:01 +00:00
|
|
|
return util.NewValidationError(fmt.Sprintf("could not encrypt Azure blob SAS URL: %v", err))
|
2021-06-11 20:27:36 +00:00
|
|
|
}
|
|
|
|
}
|
2020-12-12 09:31:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *AzBlobFsConfig) checkCredentials() error {
|
2021-06-11 20:27:36 +00:00
|
|
|
if c.SASURL.IsPlain() {
|
|
|
|
_, err := url.Parse(c.SASURL.GetPayload())
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if c.SASURL.IsEncrypted() && !c.SASURL.IsValid() {
|
|
|
|
return errors.New("invalid encrypted sas_url")
|
|
|
|
}
|
|
|
|
if !c.SASURL.IsEmpty() {
|
|
|
|
return nil
|
|
|
|
}
|
2020-12-12 09:31:09 +00:00
|
|
|
if c.AccountName == "" || !c.AccountKey.IsValidInput() {
|
|
|
|
return errors.New("credentials cannot be empty or invalid")
|
|
|
|
}
|
|
|
|
if c.AccountKey.IsEncrypted() && !c.AccountKey.IsValid() {
|
|
|
|
return errors.New("invalid encrypted account_key")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-02-21 18:01:31 +00:00
|
|
|
func (c *AzBlobFsConfig) checkPartSizeAndConcurrency() error {
|
|
|
|
if c.UploadPartSize < 0 || c.UploadPartSize > 100 {
|
|
|
|
return fmt.Errorf("invalid upload part size: %v", c.UploadPartSize)
|
|
|
|
}
|
|
|
|
if c.UploadConcurrency < 0 || c.UploadConcurrency > 64 {
|
|
|
|
return fmt.Errorf("invalid upload concurrency: %v", c.UploadConcurrency)
|
|
|
|
}
|
|
|
|
if c.DownloadPartSize < 0 || c.DownloadPartSize > 100 {
|
|
|
|
return fmt.Errorf("invalid download part size: %v", c.DownloadPartSize)
|
|
|
|
}
|
|
|
|
if c.DownloadConcurrency < 0 || c.DownloadConcurrency > 64 {
|
|
|
|
return fmt.Errorf("invalid upload concurrency: %v", c.DownloadConcurrency)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-02 16:32:46 +00:00
|
|
|
func (c *AzBlobFsConfig) tryDecrypt() error {
|
|
|
|
if err := c.AccountKey.TryDecrypt(); err != nil {
|
|
|
|
return fmt.Errorf("unable to decrypt account key: %w", err)
|
|
|
|
}
|
|
|
|
if err := c.SASURL.TryDecrypt(); err != nil {
|
|
|
|
return fmt.Errorf("unable to decrypt SAS URL: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-28 10:55:01 +00:00
|
|
|
// validate returns an error if the configuration is not valid
|
|
|
|
func (c *AzBlobFsConfig) validate() error {
|
2020-12-12 09:31:09 +00:00
|
|
|
if c.AccountKey == nil {
|
|
|
|
c.AccountKey = kms.NewEmptySecret()
|
|
|
|
}
|
2021-06-11 20:27:36 +00:00
|
|
|
if c.SASURL == nil {
|
|
|
|
c.SASURL = kms.NewEmptySecret()
|
2020-12-12 09:31:09 +00:00
|
|
|
}
|
2021-06-11 20:27:36 +00:00
|
|
|
// container could be embedded within SAS URL we check this at runtime
|
|
|
|
if c.SASURL.IsEmpty() && c.Container == "" {
|
2020-12-12 09:31:09 +00:00
|
|
|
return errors.New("container cannot be empty")
|
|
|
|
}
|
|
|
|
if err := c.checkCredentials(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if c.KeyPrefix != "" {
|
|
|
|
if strings.HasPrefix(c.KeyPrefix, "/") {
|
|
|
|
return errors.New("key_prefix cannot start with /")
|
|
|
|
}
|
|
|
|
c.KeyPrefix = path.Clean(c.KeyPrefix)
|
|
|
|
if !strings.HasSuffix(c.KeyPrefix, "/") {
|
|
|
|
c.KeyPrefix += "/"
|
|
|
|
}
|
|
|
|
}
|
2022-02-21 18:01:31 +00:00
|
|
|
if err := c.checkPartSizeAndConcurrency(); err != nil {
|
|
|
|
return err
|
2020-12-12 09:31:09 +00:00
|
|
|
}
|
2022-05-19 17:49:51 +00:00
|
|
|
if !util.Contains(validAzAccessTier, c.AccessTier) {
|
2020-12-12 09:31:09 +00:00
|
|
|
return fmt.Errorf("invalid access tier %#v, valid values: \"''%v\"", c.AccessTier, strings.Join(validAzAccessTier, ", "))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-12-05 12:48:13 +00:00
|
|
|
// CryptFsConfig defines the configuration to store local files as encrypted
|
|
|
|
type CryptFsConfig struct {
|
2022-01-06 09:11:47 +00:00
|
|
|
Passphrase *kms.Secret `json:"passphrase,omitempty"`
|
2020-12-05 12:48:13 +00:00
|
|
|
}
|
|
|
|
|
2021-09-11 12:19:17 +00:00
|
|
|
// HideConfidentialData hides confidential data
|
|
|
|
func (c *CryptFsConfig) HideConfidentialData() {
|
|
|
|
if c.Passphrase != nil {
|
|
|
|
c.Passphrase.Hide()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-27 18:10:27 +00:00
|
|
|
func (c *CryptFsConfig) isEqual(other *CryptFsConfig) bool {
|
|
|
|
if c.Passphrase == nil {
|
|
|
|
c.Passphrase = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
if other.Passphrase == nil {
|
|
|
|
other.Passphrase = kms.NewEmptySecret()
|
|
|
|
}
|
|
|
|
return c.Passphrase.IsEqual(other.Passphrase)
|
|
|
|
}
|
|
|
|
|
2022-04-28 10:55:01 +00:00
|
|
|
// ValidateAndEncryptCredentials validates the configuration and encrypts the passphrase if it is in plain text
|
|
|
|
func (c *CryptFsConfig) ValidateAndEncryptCredentials(additionalData string) error {
|
|
|
|
if err := c.validate(); err != nil {
|
|
|
|
return util.NewValidationError(fmt.Sprintf("could not validate Crypt fs config: %v", err))
|
|
|
|
}
|
2020-12-12 09:31:09 +00:00
|
|
|
if c.Passphrase.IsPlain() {
|
|
|
|
c.Passphrase.SetAdditionalData(additionalData)
|
|
|
|
if err := c.Passphrase.Encrypt(); err != nil {
|
2022-04-28 10:55:01 +00:00
|
|
|
return util.NewValidationError(fmt.Sprintf("could not encrypt Crypt fs passphrase: %v", err))
|
2020-12-12 09:31:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-28 10:55:01 +00:00
|
|
|
// validate returns an error if the configuration is not valid
|
|
|
|
func (c *CryptFsConfig) validate() error {
|
2020-12-12 09:31:09 +00:00
|
|
|
if c.Passphrase == nil || c.Passphrase.IsEmpty() {
|
|
|
|
return errors.New("invalid passphrase")
|
|
|
|
}
|
|
|
|
if !c.Passphrase.IsValidInput() {
|
|
|
|
return errors.New("passphrase cannot be empty or invalid")
|
|
|
|
}
|
|
|
|
if c.Passphrase.IsEncrypted() && !c.Passphrase.IsValid() {
|
|
|
|
return errors.New("invalid encrypted passphrase")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-19 17:17:43 +00:00
|
|
|
// PipeWriter defines a wrapper for pipeat.PipeWriterAt.
|
|
|
|
type PipeWriter struct {
|
|
|
|
writer *pipeat.PipeWriterAt
|
|
|
|
err error
|
|
|
|
done chan bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPipeWriter initializes a new PipeWriter
|
|
|
|
func NewPipeWriter(w *pipeat.PipeWriterAt) *PipeWriter {
|
|
|
|
return &PipeWriter{
|
|
|
|
writer: w,
|
|
|
|
err: nil,
|
|
|
|
done: make(chan bool),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close waits for the upload to end, closes the pipeat.PipeWriterAt and returns an error if any.
|
|
|
|
func (p *PipeWriter) Close() error {
|
|
|
|
p.writer.Close() //nolint:errcheck // the returned error is always null
|
|
|
|
<-p.done
|
|
|
|
return p.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Done unlocks other goroutines waiting on Close().
|
|
|
|
// It must be called when the upload ends
|
|
|
|
func (p *PipeWriter) Done(err error) {
|
|
|
|
p.err = err
|
|
|
|
p.done <- true
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteAt is a wrapper for pipeat WriteAt
|
|
|
|
func (p *PipeWriter) WriteAt(data []byte, off int64) (int, error) {
|
|
|
|
return p.writer.WriteAt(data, off)
|
|
|
|
}
|
|
|
|
|
2020-07-24 21:39:38 +00:00
|
|
|
// Write is a wrapper for pipeat Write
|
|
|
|
func (p *PipeWriter) Write(data []byte) (int, error) {
|
|
|
|
return p.writer.Write(data)
|
|
|
|
}
|
|
|
|
|
2020-01-19 06:41:05 +00:00
|
|
|
// IsDirectory checks if a path exists and is a directory
|
|
|
|
func IsDirectory(fs Fs, path string) (bool, error) {
|
|
|
|
fileInfo, err := fs.Stat(path)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return fileInfo.IsDir(), err
|
|
|
|
}
|
|
|
|
|
2020-12-05 12:48:13 +00:00
|
|
|
// IsLocalOsFs returns true if fs is a local filesystem implementation
|
2020-01-19 06:41:05 +00:00
|
|
|
func IsLocalOsFs(fs Fs) bool {
|
|
|
|
return fs.Name() == osFsName
|
|
|
|
}
|
|
|
|
|
2020-12-05 12:48:13 +00:00
|
|
|
// IsCryptOsFs returns true if fs is an encrypted local filesystem implementation
|
|
|
|
func IsCryptOsFs(fs Fs) bool {
|
|
|
|
return fs.Name() == cryptFsName
|
|
|
|
}
|
|
|
|
|
2021-02-14 21:08:08 +00:00
|
|
|
// IsSFTPFs returns true if fs is an SFTP filesystem
|
2020-12-12 09:31:09 +00:00
|
|
|
func IsSFTPFs(fs Fs) bool {
|
|
|
|
return strings.HasPrefix(fs.Name(), sftpFsName)
|
2020-11-22 20:53:04 +00:00
|
|
|
}
|
|
|
|
|
2022-06-11 08:41:34 +00:00
|
|
|
// IsHTTPFs returns true if fs is an HTTP filesystem
|
|
|
|
func IsHTTPFs(fs Fs) bool {
|
|
|
|
return strings.HasPrefix(fs.Name(), httpFsName)
|
|
|
|
}
|
|
|
|
|
2021-04-03 14:00:55 +00:00
|
|
|
// IsBufferedSFTPFs returns true if this is a buffered SFTP filesystem
|
|
|
|
func IsBufferedSFTPFs(fs Fs) bool {
|
|
|
|
if !IsSFTPFs(fs) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return !fs.IsUploadResumeSupported()
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsLocalOrUnbufferedSFTPFs returns true if fs is local or SFTP with no buffer
|
|
|
|
func IsLocalOrUnbufferedSFTPFs(fs Fs) bool {
|
|
|
|
if IsLocalOsFs(fs) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if IsSFTPFs(fs) {
|
|
|
|
return fs.IsUploadResumeSupported()
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-12-12 09:31:09 +00:00
|
|
|
// IsLocalOrSFTPFs returns true if fs is local or SFTP
|
|
|
|
func IsLocalOrSFTPFs(fs Fs) bool {
|
|
|
|
return IsLocalOsFs(fs) || IsSFTPFs(fs)
|
2020-12-05 12:48:13 +00:00
|
|
|
}
|
|
|
|
|
2022-06-11 08:41:34 +00:00
|
|
|
// HasTruncateSupport returns true if the fs supports truncate files
|
|
|
|
func HasTruncateSupport(fs Fs) bool {
|
|
|
|
return IsLocalOsFs(fs) || IsSFTPFs(fs) || IsHTTPFs(fs)
|
|
|
|
}
|
|
|
|
|
2021-04-03 14:00:55 +00:00
|
|
|
// HasOpenRWSupport returns true if the fs can open a file
|
|
|
|
// for reading and writing at the same time
|
|
|
|
func HasOpenRWSupport(fs Fs) bool {
|
|
|
|
if IsLocalOsFs(fs) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if IsSFTPFs(fs) && fs.IsUploadResumeSupported() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-03-21 18:15:47 +00:00
|
|
|
// IsLocalOrCryptoFs returns true if fs is local or local encrypted
|
|
|
|
func IsLocalOrCryptoFs(fs Fs) bool {
|
|
|
|
return IsLocalOsFs(fs) || IsCryptOsFs(fs)
|
|
|
|
}
|
|
|
|
|
2020-01-19 06:41:05 +00:00
|
|
|
// SetPathPermissions calls fs.Chown.
|
|
|
|
// It does nothing for local filesystem on windows
|
|
|
|
func SetPathPermissions(fs Fs, path string, uid int, gid int) {
|
2020-12-12 09:31:09 +00:00
|
|
|
if uid == -1 && gid == -1 {
|
|
|
|
return
|
|
|
|
}
|
2020-01-19 06:41:05 +00:00
|
|
|
if IsLocalOsFs(fs) {
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := fs.Chown(path, uid, gid); err != nil {
|
|
|
|
fsLog(fs, logger.LevelWarn, "error chowning path %v: %v", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-16 17:18:36 +00:00
|
|
|
func updateFileInfoModTime(storageID, objectPath string, info *FileInfo) (*FileInfo, error) {
|
|
|
|
if !plugin.Handler.HasMetadater() {
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
if info.IsDir() {
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
mTime, err := plugin.Handler.GetModificationTime(storageID, ensureAbsPath(objectPath), info.IsDir())
|
|
|
|
if errors.Is(err, metadata.ErrNoSuchObject) {
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
info.modTime = util.GetTimeFromMsecSinceEpoch(mTime)
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getFolderModTimes(storageID, dirName string) (map[string]int64, error) {
|
|
|
|
var err error
|
|
|
|
modTimes := make(map[string]int64)
|
|
|
|
if plugin.Handler.HasMetadater() {
|
|
|
|
modTimes, err = plugin.Handler.GetModificationTimes(storageID, ensureAbsPath(dirName))
|
|
|
|
if err != nil && !errors.Is(err, metadata.ErrNoSuchObject) {
|
|
|
|
return modTimes, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return modTimes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func ensureAbsPath(name string) string {
|
|
|
|
if path.IsAbs(name) {
|
|
|
|
return name
|
|
|
|
}
|
|
|
|
return path.Join("/", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
func fsMetadataCheck(fs fsMetadataChecker, storageID, keyPrefix string) error {
|
|
|
|
if !plugin.Handler.HasMetadater() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
limit := 100
|
|
|
|
from := ""
|
|
|
|
for {
|
|
|
|
metadataFolders, err := plugin.Handler.GetMetadataFolders(storageID, from, limit)
|
|
|
|
if err != nil {
|
|
|
|
fsLog(fs, logger.LevelError, "unable to get folders: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, folder := range metadataFolders {
|
|
|
|
from = folder
|
|
|
|
fsPrefix := folder
|
|
|
|
if !strings.HasSuffix(folder, "/") {
|
|
|
|
fsPrefix += "/"
|
|
|
|
}
|
|
|
|
if keyPrefix != "" {
|
|
|
|
if !strings.HasPrefix(fsPrefix, "/"+keyPrefix) {
|
|
|
|
fsLog(fs, logger.LevelDebug, "skip metadata check for folder %#v outside prefix %#v",
|
|
|
|
folder, keyPrefix)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fsLog(fs, logger.LevelDebug, "check metadata for folder %#v", folder)
|
|
|
|
metadataValues, err := plugin.Handler.GetModificationTimes(storageID, folder)
|
|
|
|
if err != nil {
|
|
|
|
fsLog(fs, logger.LevelError, "unable to get modification times for folder %#v: %v", folder, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(metadataValues) == 0 {
|
|
|
|
fsLog(fs, logger.LevelDebug, "no metadata for folder %#v", folder)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fileNames, err := fs.getFileNamesInPrefix(fsPrefix)
|
|
|
|
if err != nil {
|
|
|
|
fsLog(fs, logger.LevelError, "unable to get content for prefix %#v: %v", fsPrefix, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// now check if we have metadata for a missing object
|
|
|
|
for k := range metadataValues {
|
|
|
|
if _, ok := fileNames[k]; !ok {
|
|
|
|
filePath := ensureAbsPath(path.Join(folder, k))
|
|
|
|
if err = plugin.Handler.RemoveMetadata(storageID, filePath); err != nil {
|
|
|
|
fsLog(fs, logger.LevelError, "unable to remove metadata for missing file %#v: %v", filePath, err)
|
|
|
|
} else {
|
|
|
|
fsLog(fs, logger.LevelDebug, "metadata removed for missing file %#v", filePath)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(metadataFolders) < limit {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-02 16:32:46 +00:00
|
|
|
func getMountPath(mountPath string) string {
|
|
|
|
if mountPath == "/" {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return mountPath
|
|
|
|
}
|
|
|
|
|
2022-05-19 17:49:51 +00:00
|
|
|
func fsLog(fs Fs, level logger.LogLevel, format string, v ...any) {
|
2020-01-19 06:41:05 +00:00
|
|
|
logger.Log(level, fs.Name(), fs.ConnectionID(), format, v...)
|
|
|
|
}
|