remove metadata plugin
Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
parent
d8339ab967
commit
e61fb42cbc
35 changed files with 71 additions and 1128 deletions
|
@ -17,7 +17,7 @@ esac
|
||||||
|
|
||||||
echo "download plugins for arch ${SUFFIX}"
|
echo "download plugins for arch ${SUFFIX}"
|
||||||
|
|
||||||
for PLUGIN in geoipfilter kms pubsub eventstore eventsearch metadata auth
|
for PLUGIN in geoipfilter kms pubsub eventstore eventsearch auth
|
||||||
do
|
do
|
||||||
echo "download plugin from https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/latest/download/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}"
|
echo "download plugin from https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/latest/download/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}"
|
||||||
curl -L "https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/latest/download/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}" --output "/usr/local/bin/sftpgo-plugin-${PLUGIN}"
|
curl -L "https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/latest/download/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}" --output "/usr/local/bin/sftpgo-plugin-${PLUGIN}"
|
||||||
|
|
|
@ -12,7 +12,6 @@ The following actions are supported:
|
||||||
- `Folder quota reset`. The quota used by virtual folders will be updated based on current usage.
|
- `Folder quota reset`. The quota used by virtual folders will be updated based on current usage.
|
||||||
- `Transfer quota reset`. The transfer quota values will be reset to `0`.
|
- `Transfer quota reset`. The transfer quota values will be reset to `0`.
|
||||||
- `Data retention check`. You can define per-folder retention policies.
|
- `Data retention check`. You can define per-folder retention policies.
|
||||||
- `Metadata check`. A metadata check requires a metadata plugin such as [this one](https://github.com/sftpgo/sftpgo-plugin-metadata) and removes the metadata associated to missing items (for example objects deleted outside SFTPGo). A metadata check does nothing is no metadata plugin is installed or external metadata are not supported for a filesystem.
|
|
||||||
- `Password expiration check`. You can send an email notification to users whose password is about to expire.
|
- `Password expiration check`. You can send an email notification to users whose password is about to expire.
|
||||||
- `User expiration check`. You can receive notifications with expired users.
|
- `User expiration check`. You can receive notifications with expired users.
|
||||||
- `Identity Provider account check`. You can create/update accounts for users/admins logging in using an Identity Provider.
|
- `Identity Provider account check`. You can create/update accounts for users/admins logging in using an Identity Provider.
|
||||||
|
|
|
@ -67,7 +67,7 @@ The configuration file contains the following sections:
|
||||||
- `execute_on`, list of strings. Valid values are `pre-download`, `download`, `first-download`, `pre-upload`, `upload`, `first-upload`, `pre-delete`, `delete`, `rename`, `mkdir`, `rmdir`, `ssh_cmd`, `copy`. Leave empty to disable actions.
|
- `execute_on`, list of strings. Valid values are `pre-download`, `download`, `first-download`, `pre-upload`, `upload`, `first-upload`, `pre-delete`, `delete`, `rename`, `mkdir`, `rmdir`, `ssh_cmd`, `copy`. Leave empty to disable actions.
|
||||||
- `execute_sync`, list of strings. Actions, defined in the `execute_on` list above, to be performed synchronously. The `pre-*` actions are always executed synchronously while the other ones are asynchronous. Executing an action synchronously means that SFTPGo will not return a result code to the client (which is waiting for it) until your hook have completed its execution. Leave empty to execute only the defined `pre-*` hook synchronously
|
- `execute_sync`, list of strings. Actions, defined in the `execute_on` list above, to be performed synchronously. The `pre-*` actions are always executed synchronously while the other ones are asynchronous. Executing an action synchronously means that SFTPGo will not return a result code to the client (which is waiting for it) until your hook have completed its execution. Leave empty to execute only the defined `pre-*` hook synchronously
|
||||||
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
|
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
|
||||||
- `setstat_mode`, integer. 0 means "normal mode": requests for changing permissions, owner/group and access/modification times are executed. 1 means "ignore mode": requests for changing permissions, owner/group and access/modification times are silently ignored. 2 means "ignore mode if not supported": requests for changing permissions and owner/group are silently ignored for cloud filesystems and executed for local/SFTP filesystem. Requests for changing modification times are always executed for local/SFTP filesystems and are executed for cloud based filesystems if the target is a file and there is a metadata plugin available. A metadata plugin can be found [here](https://github.com/sftpgo/sftpgo-plugin-metadata).
|
- `setstat_mode`, integer. 0 means "normal mode": requests for changing permissions, owner/group and access/modification times are executed. 1 means "ignore mode": requests for changing permissions, owner/group and access/modification times are silently ignored. 2 means "ignore mode if not supported": requests for changing permissions and owner/group are silently ignored for cloud filesystems and executed for local/SFTP filesystem.
|
||||||
- `rename_mode`, integer. By default (`0`), renaming of non-empty directories is not allowed for cloud storage providers (S3, GCS, Azure Blob). Set to `1` to enable recursive renames for these providers, they may be slow, there is no atomic rename API like for local filesystem, so SFTPGo will recursively list the directory contents and do a rename for each entry (partial renaming and incorrect disk quota updates are possible in error cases). Default `0`.
|
- `rename_mode`, integer. By default (`0`), renaming of non-empty directories is not allowed for cloud storage providers (S3, GCS, Azure Blob). Set to `1` to enable recursive renames for these providers, they may be slow, there is no atomic rename API like for local filesystem, so SFTPGo will recursively list the directory contents and do a rename for each entry (partial renaming and incorrect disk quota updates are possible in error cases). Default `0`.
|
||||||
- `resume_max_size`, integer. defines the maximum size allowed, in bytes, to resume uploads on storage backends with immutable objects. By default, resuming uploads is not allowed for cloud storage providers (S3, GCS, Azure Blob) because SFTPGo must rewrite the entire file. Set to a value greater than 0 to allow resuming uploads of files smaller than or equal to the defined size. Please note that uploads for these backends are still atomic, the client must intentionally upload a portion of the target file and then resume uploading.. Default `0`.
|
- `resume_max_size`, integer. defines the maximum size allowed, in bytes, to resume uploads on storage backends with immutable objects. By default, resuming uploads is not allowed for cloud storage providers (S3, GCS, Azure Blob) because SFTPGo must rewrite the entire file. Set to a value greater than 0 to allow resuming uploads of files smaller than or equal to the defined size. Please note that uploads for these backends are still atomic, the client must intentionally upload a portion of the target file and then resume uploading.. Default `0`.
|
||||||
- `temp_path`, string. Defines the path for temporary files such as those used for atomic uploads or file pipes. If you set this option you must make sure that the defined path exists, is accessible for writing by the user running SFTPGo, and is on the same filesystem as the users home directories otherwise the renaming for atomic uploads will become a copy and therefore may take a long time. The temporary files are not namespaced. The default is generally fine. Leave empty for the default.
|
- `temp_path`, string. Defines the path for temporary files such as those used for atomic uploads or file pipes. If you set this option you must make sure that the defined path exists, is accessible for writing by the user running SFTPGo, and is on the same filesystem as the users home directories otherwise the renaming for atomic uploads will become a copy and therefore may take a long time. The temporary files are not namespaced. The default is generally fine. Leave empty for the default.
|
||||||
|
@ -478,7 +478,7 @@ The configuration file contains the following sections:
|
||||||
<details><summary><font size=4>Plugins</font></summary>
|
<details><summary><font size=4>Plugins</font></summary>
|
||||||
|
|
||||||
- **plugins**, list of external plugins. :warning: Please note that the plugin system is experimental, the configuration parameters and interfaces may change in a backward incompatible way in future. Each plugin is configured using a struct with the following fields:
|
- **plugins**, list of external plugins. :warning: Please note that the plugin system is experimental, the configuration parameters and interfaces may change in a backward incompatible way in future. Each plugin is configured using a struct with the following fields:
|
||||||
- `type`, string. Defines the plugin type. Supported types: `notifier`, `kms`, `auth`, `metadata`, `eventsearcher`, `ipfilter`.
|
- `type`, string. Defines the plugin type. Supported types: `notifier`, `kms`, `auth`, `eventsearcher`, `ipfilter`.
|
||||||
- `notifier_options`, struct. Defines the options for notifier plugins.
|
- `notifier_options`, struct. Defines the options for notifier plugins.
|
||||||
- `fs_events`, list of strings. Defines the filesystem events that will be notified to this plugin.
|
- `fs_events`, list of strings. Defines the filesystem events that will be notified to this plugin.
|
||||||
- `provider_events`, list of strings. Defines the provider events that will be notified to this plugin.
|
- `provider_events`, list of strings. Defines the provider events that will be notified to this plugin.
|
||||||
|
|
|
@ -8,4 +8,4 @@ You can optionally specify a [storage class](https://cloud.google.com/storage/do
|
||||||
|
|
||||||
The configured bucket must exist.
|
The configured bucket must exist.
|
||||||
|
|
||||||
This backend is very similar to the [S3](./s3.md) backend, and it has the same limitations. As with S3 `chtime` will fail with the default configuration, you can install the [metadata plugin](https://github.com/sftpgo/sftpgo-plugin-metadata) to make it work and thus be able to preserve/change file modification times.
|
This backend is very similar to the [S3](./s3.md) backend, and it has the same limitations.
|
||||||
|
|
|
@ -13,7 +13,6 @@ The following plugin types are supported:
|
||||||
- `auth`, allows to authenticate users.
|
- `auth`, allows to authenticate users.
|
||||||
- `notifier`, allows to receive notifications for supported filesystem events such as file uploads, downloads etc. and provider events such as objects add, update, delete.
|
- `notifier`, allows to receive notifications for supported filesystem events such as file uploads, downloads etc. and provider events such as objects add, update, delete.
|
||||||
- `kms`, allows to support additional KMS providers.
|
- `kms`, allows to support additional KMS providers.
|
||||||
- `metadata`, allows to store metadata, such as the last modification time, for storage backends that does not support them (S3, Google Cloud Storage, Azure Blob).
|
|
||||||
- `ipfilter`, allows to allow/deny access based on client IP.
|
- `ipfilter`, allows to allow/deny access based on client IP.
|
||||||
|
|
||||||
Full configuration details can be found [here](./full-configuration.md).
|
Full configuration details can be found [here](./full-configuration.md).
|
||||||
|
|
|
@ -39,7 +39,6 @@ You can create other administrator and assign them the following permissions:
|
||||||
- manage admins
|
- manage admins
|
||||||
- manage groups
|
- manage groups
|
||||||
- manage data retention
|
- manage data retention
|
||||||
- manage metadata
|
|
||||||
- view events
|
- view events
|
||||||
- manage event rules
|
- manage event rules
|
||||||
|
|
||||||
|
|
|
@ -36,4 +36,4 @@ Other notes:
|
||||||
- For server side encryption, you have to configure the mapped bucket to automatically encrypt objects.
|
- For server side encryption, you have to configure the mapped bucket to automatically encrypt objects.
|
||||||
- A local home directory is still required to store temporary files.
|
- A local home directory is still required to store temporary files.
|
||||||
- Clients that require advanced filesystem-like features such as `sshfs` are not supported.
|
- Clients that require advanced filesystem-like features such as `sshfs` are not supported.
|
||||||
- `chtime` will fail with the default configuration, you can install the [metadata plugin](https://github.com/sftpgo/sftpgo-plugin-metadata) to make it work and thus be able to preserve/change file modification times.
|
- `chtime` not supported.
|
||||||
|
|
10
go.mod
10
go.mod
|
@ -13,9 +13,9 @@ require (
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.27.0
|
github.com/aws/aws-sdk-go-v2/config v1.27.0
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.0
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.0
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.0
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.1
|
||||||
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.20.1
|
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.20.1
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.50.0
|
||||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.1
|
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.1
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.27.0
|
github.com/aws/aws-sdk-go-v2/service/sts v1.27.0
|
||||||
github.com/bmatcuk/doublestar/v4 v4.6.1
|
github.com/bmatcuk/doublestar/v4 v4.6.1
|
||||||
|
@ -26,7 +26,7 @@ require (
|
||||||
github.com/fclairamb/ftpserverlib v0.22.0
|
github.com/fclairamb/ftpserverlib v0.22.0
|
||||||
github.com/fclairamb/go-log v0.4.1
|
github.com/fclairamb/go-log v0.4.1
|
||||||
github.com/go-acme/lego/v4 v4.15.0
|
github.com/go-acme/lego/v4 v4.15.0
|
||||||
github.com/go-chi/chi/v5 v5.0.11
|
github.com/go-chi/chi/v5 v5.0.12
|
||||||
github.com/go-chi/jwtauth/v5 v5.3.0
|
github.com/go-chi/jwtauth/v5 v5.3.0
|
||||||
github.com/go-chi/render v1.0.3
|
github.com/go-chi/render v1.0.3
|
||||||
github.com/go-sql-driver/mysql v1.7.1
|
github.com/go-sql-driver/mysql v1.7.1
|
||||||
|
@ -53,7 +53,7 @@ require (
|
||||||
github.com/rs/cors v1.10.1
|
github.com/rs/cors v1.10.1
|
||||||
github.com/rs/xid v1.5.0
|
github.com/rs/xid v1.5.0
|
||||||
github.com/rs/zerolog v1.32.0
|
github.com/rs/zerolog v1.32.0
|
||||||
github.com/sftpgo/sdk v0.1.6-0.20240114195211-3f4916cc829c
|
github.com/sftpgo/sdk v0.1.6-0.20240216180841-c13afec62842
|
||||||
github.com/shirou/gopsutil/v3 v3.24.1
|
github.com/shirou/gopsutil/v3 v3.24.1
|
||||||
github.com/spf13/afero v1.11.0
|
github.com/spf13/afero v1.11.0
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/spf13/cobra v1.8.0
|
||||||
|
@ -145,7 +145,7 @@ require (
|
||||||
github.com/pelletier/go-toml/v2 v2.1.1 // indirect
|
github.com/pelletier/go-toml/v2 v2.1.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
|
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
|
||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/prometheus/client_model v0.6.0 // indirect
|
||||||
github.com/prometheus/common v0.47.0 // indirect
|
github.com/prometheus/common v0.47.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
|
|
20
go.sum
20
go.sum
|
@ -43,8 +43,8 @@ github.com/aws/aws-sdk-go-v2/credentials v1.17.0 h1:lMW2x6sKBsiAJrpi1doOXqWFyEPo
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.0/go.mod h1:uT41FIH8cCIxOdUYIL0PYyHlL1NoneDuDSCwg5VE/5o=
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.0/go.mod h1:uT41FIH8cCIxOdUYIL0PYyHlL1NoneDuDSCwg5VE/5o=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 h1:xWCwjjvVz2ojYTP4kBKUuUh9ZrXfcAXpflhOUUeXg1k=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 h1:xWCwjjvVz2ojYTP4kBKUuUh9ZrXfcAXpflhOUUeXg1k=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0/go.mod h1:j3fACuqXg4oMTQOR2yY7m0NmJY0yBK4L4sLsRXq1Ins=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0/go.mod h1:j3fACuqXg4oMTQOR2yY7m0NmJY0yBK4L4sLsRXq1Ins=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.0 h1:FHVyVIJpOeQZCnYj9EVKTWahb4WDNFEUOKCx/dOUPcM=
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.1 h1:FqtJUSBgT2yfZ8kZhTi9AO131qMLOzb4MiH4riAM8XM=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.0/go.mod h1:SL/aJzGL0LsQPQ1y2HMNbJGrm/Xh6aVCGq6ki+DLGEw=
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.1/go.mod h1:G3V4qNUPMHKrXW/l149QXmHjf1vlMWBO4UuGPCK4a/c=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 h1:NPs/EqVO+ajwOoq56EfcGKa3L3ruWuazkIw1BqxwOPw=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 h1:NPs/EqVO+ajwOoq56EfcGKa3L3ruWuazkIw1BqxwOPw=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0/go.mod h1:D+duLy2ylgatV+yTlQ8JTuLfDD0BnFvnQRc+o6tbZ4M=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0/go.mod h1:D+duLy2ylgatV+yTlQ8JTuLfDD0BnFvnQRc+o6tbZ4M=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 h1:ks7KGMVUMoDzcxNWUlEdI+/lokMFD136EL6DWmUOV80=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 h1:ks7KGMVUMoDzcxNWUlEdI+/lokMFD136EL6DWmUOV80=
|
||||||
|
@ -63,8 +63,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 h1:l5puwOHr7IxECu
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0/go.mod h1:Oov79flWa/n7Ni+lQC3z+VM7PoRM47omRqbJU9B5Y7E=
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0/go.mod h1:Oov79flWa/n7Ni+lQC3z+VM7PoRM47omRqbJU9B5Y7E=
|
||||||
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.20.1 h1:eHNChn4Sp+g1hdz4rkx96n1l/LpJEQLDuFB0V+fA/yg=
|
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.20.1 h1:eHNChn4Sp+g1hdz4rkx96n1l/LpJEQLDuFB0V+fA/yg=
|
||||||
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.20.1/go.mod h1:9ev55pJx9xNX3UAOKzZmbmaTbwwuLTCemOJPsd7rUz8=
|
github.com/aws/aws-sdk-go-v2/service/marketplacemetering v1.20.1/go.mod h1:9ev55pJx9xNX3UAOKzZmbmaTbwwuLTCemOJPsd7rUz8=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0 h1:VfU15izXQjz4m9y1DkbY79iylIiuPwWtrram4cSpWEI=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.50.0 h1:jZAdMD1ioZdqirzzVVRhpHHWJmcGGCn8JqDYBs5nmYA=
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0/go.mod h1:1o/W6JFUuREj2ExoQ21vHJgO7wakvjhol91M9eknFgs=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.50.0/go.mod h1:1o/W6JFUuREj2ExoQ21vHJgO7wakvjhol91M9eknFgs=
|
||||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.1 h1:ss/HbHbONu0uscM549++4YanT6MnjNN0BGhE5pZRfG4=
|
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.1 h1:ss/HbHbONu0uscM549++4YanT6MnjNN0BGhE5pZRfG4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.1/go.mod h1:JsJDZFHwLGZu6dxhV9EV1gJrMnCeE4GEXubSZA59xdA=
|
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.1/go.mod h1:JsJDZFHwLGZu6dxhV9EV1gJrMnCeE4GEXubSZA59xdA=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 h1:u6OkVDxtBPnxPkZ9/63ynEe+8kHbtS5IfaC4PzVxzWM=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 h1:u6OkVDxtBPnxPkZ9/63ynEe+8kHbtS5IfaC4PzVxzWM=
|
||||||
|
@ -142,8 +142,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
github.com/go-acme/lego/v4 v4.15.0 h1:A7MHEU3b+TDFqhC/HmzMJnzPbyeaYvMZQBbqgvbThhU=
|
github.com/go-acme/lego/v4 v4.15.0 h1:A7MHEU3b+TDFqhC/HmzMJnzPbyeaYvMZQBbqgvbThhU=
|
||||||
github.com/go-acme/lego/v4 v4.15.0/go.mod h1:eeGhjW4zWT7Ccqa3sY7ayEqFLCAICx+mXgkMHKIkLxg=
|
github.com/go-acme/lego/v4 v4.15.0/go.mod h1:eeGhjW4zWT7Ccqa3sY7ayEqFLCAICx+mXgkMHKIkLxg=
|
||||||
github.com/go-chi/chi/v5 v5.0.11 h1:BnpYbFZ3T3S1WMpD79r7R5ThWX40TaFB7L31Y8xqSwA=
|
github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s=
|
||||||
github.com/go-chi/chi/v5 v5.0.11/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/go-chi/jwtauth/v5 v5.3.0 h1:X7RKGks1lrVeIe2omGyz47pNaNjG2YmwlRN5UKhN8qg=
|
github.com/go-chi/jwtauth/v5 v5.3.0 h1:X7RKGks1lrVeIe2omGyz47pNaNjG2YmwlRN5UKhN8qg=
|
||||||
github.com/go-chi/jwtauth/v5 v5.3.0/go.mod h1:2PoGm/KbnzRN9ILY6HFZAI6fTnb1gEZAKogAyqkd6fY=
|
github.com/go-chi/jwtauth/v5 v5.3.0/go.mod h1:2PoGm/KbnzRN9ILY6HFZAI6fTnb1gEZAKogAyqkd6fY=
|
||||||
github.com/go-chi/render v1.0.3 h1:AsXqd2a1/INaIfUSKq3G5uA8weYx20FOsM7uSoCyyt4=
|
github.com/go-chi/render v1.0.3 h1:AsXqd2a1/INaIfUSKq3G5uA8weYx20FOsM7uSoCyyt4=
|
||||||
|
@ -331,8 +331,8 @@ github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P
|
||||||
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
|
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
|
||||||
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
|
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
|
||||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
|
||||||
github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k=
|
github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k=
|
||||||
github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||||
|
@ -355,8 +355,8 @@ github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJ
|
||||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
|
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
|
||||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||||
github.com/sftpgo/sdk v0.1.6-0.20240114195211-3f4916cc829c h1:07TYPvNbOnmKsBxjNsUr+gsILIUWflw1UYwjn1jognM=
|
github.com/sftpgo/sdk v0.1.6-0.20240216180841-c13afec62842 h1:Rqh/TYkMX6UmUWvgXrsOBoG7ee2GH1AJXBFlszIzKT0=
|
||||||
github.com/sftpgo/sdk v0.1.6-0.20240114195211-3f4916cc829c/go.mod h1:AWoY2YYe/P1ymfTlRER/meERQjCcZZTbgVPGcPQgaqc=
|
github.com/sftpgo/sdk v0.1.6-0.20240216180841-c13afec62842/go.mod h1:AWoY2YYe/P1ymfTlRER/meERQjCcZZTbgVPGcPQgaqc=
|
||||||
github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI=
|
github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI=
|
||||||
github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU=
|
github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU=
|
||||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||||
|
|
|
@ -153,11 +153,9 @@ var (
|
||||||
// Connections is the list of active connections
|
// Connections is the list of active connections
|
||||||
Connections ActiveConnections
|
Connections ActiveConnections
|
||||||
// QuotaScans is the list of active quota scans
|
// QuotaScans is the list of active quota scans
|
||||||
QuotaScans ActiveScans
|
QuotaScans ActiveScans
|
||||||
// ActiveMetadataChecks holds the active metadata checks
|
transfersChecker TransfersChecker
|
||||||
ActiveMetadataChecks MetadataChecks
|
supportedProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP, ProtocolWebDAV,
|
||||||
transfersChecker TransfersChecker
|
|
||||||
supportedProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP, ProtocolWebDAV,
|
|
||||||
ProtocolHTTP, ProtocolHTTPShare, ProtocolOIDC}
|
ProtocolHTTP, ProtocolHTTPShare, ProtocolOIDC}
|
||||||
disconnHookProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP}
|
disconnHookProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP}
|
||||||
// the map key is the protocol, for each protocol we can have multiple rate limiters
|
// the map key is the protocol, for each protocol we can have multiple rate limiters
|
||||||
|
@ -1397,74 +1395,3 @@ func (s *ActiveScans) RemoveVFolderQuotaScan(folderName string) bool {
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetadataCheck defines an active metadata check
|
|
||||||
type MetadataCheck struct {
|
|
||||||
// Username to which the metadata check refers
|
|
||||||
Username string `json:"username"`
|
|
||||||
// check start time as unix timestamp in milliseconds
|
|
||||||
StartTime int64 `json:"start_time"`
|
|
||||||
Role string `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetadataChecks holds the active metadata checks
|
|
||||||
type MetadataChecks struct {
|
|
||||||
sync.RWMutex
|
|
||||||
checks []MetadataCheck
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the active metadata checks
|
|
||||||
func (c *MetadataChecks) Get(role string) []MetadataCheck {
|
|
||||||
c.RLock()
|
|
||||||
defer c.RUnlock()
|
|
||||||
|
|
||||||
checks := make([]MetadataCheck, 0, len(c.checks))
|
|
||||||
for _, check := range c.checks {
|
|
||||||
if role == "" || role == check.Role {
|
|
||||||
checks = append(checks, MetadataCheck{
|
|
||||||
Username: check.Username,
|
|
||||||
StartTime: check.StartTime,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return checks
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a user to the ones with active metadata checks.
|
|
||||||
// Return false if a metadata check is already active for the specified user
|
|
||||||
func (c *MetadataChecks) Add(username, role string) bool {
|
|
||||||
c.Lock()
|
|
||||||
defer c.Unlock()
|
|
||||||
|
|
||||||
for idx := range c.checks {
|
|
||||||
if c.checks[idx].Username == username {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.checks = append(c.checks, MetadataCheck{
|
|
||||||
Username: username,
|
|
||||||
StartTime: util.GetTimeAsMsSinceEpoch(time.Now()),
|
|
||||||
Role: role,
|
|
||||||
})
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes a user from the ones with active metadata checks
|
|
||||||
func (c *MetadataChecks) Remove(username string) bool {
|
|
||||||
c.Lock()
|
|
||||||
defer c.Unlock()
|
|
||||||
|
|
||||||
for idx := range c.checks {
|
|
||||||
if c.checks[idx].Username == username {
|
|
||||||
lastIdx := len(c.checks) - 1
|
|
||||||
c.checks[idx] = c.checks[lastIdx]
|
|
||||||
c.checks = c.checks[:lastIdx]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
|
@ -1486,38 +1486,6 @@ func TestUpdateTransferTimestamps(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetadataAPI(t *testing.T) {
|
|
||||||
username := "metadatauser"
|
|
||||||
require.False(t, ActiveMetadataChecks.Remove(username))
|
|
||||||
require.True(t, ActiveMetadataChecks.Add(username, ""))
|
|
||||||
require.False(t, ActiveMetadataChecks.Add(username, ""))
|
|
||||||
checks := ActiveMetadataChecks.Get("")
|
|
||||||
require.Len(t, checks, 1)
|
|
||||||
checks[0].Username = username + "a"
|
|
||||||
checks = ActiveMetadataChecks.Get("")
|
|
||||||
require.Len(t, checks, 1)
|
|
||||||
require.Equal(t, username, checks[0].Username)
|
|
||||||
require.True(t, ActiveMetadataChecks.Remove(username))
|
|
||||||
require.Len(t, ActiveMetadataChecks.Get(""), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMetadataAPIRole(t *testing.T) {
|
|
||||||
username := "muser"
|
|
||||||
role1 := "r1"
|
|
||||||
role2 := "r2"
|
|
||||||
require.True(t, ActiveMetadataChecks.Add(username, role2))
|
|
||||||
require.False(t, ActiveMetadataChecks.Add(username, ""))
|
|
||||||
checks := ActiveMetadataChecks.Get("")
|
|
||||||
require.Len(t, checks, 1)
|
|
||||||
assert.Empty(t, checks[0].Role)
|
|
||||||
checks = ActiveMetadataChecks.Get(role1)
|
|
||||||
require.Len(t, checks, 0)
|
|
||||||
checks = ActiveMetadataChecks.Get(role2)
|
|
||||||
require.Len(t, checks, 1)
|
|
||||||
require.True(t, ActiveMetadataChecks.Remove(username))
|
|
||||||
require.Len(t, ActiveMetadataChecks.Get(""), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIPList(t *testing.T) {
|
func TestIPList(t *testing.T) {
|
||||||
type test struct {
|
type test struct {
|
||||||
ip string
|
ip string
|
||||||
|
|
|
@ -607,6 +607,8 @@ func TestErrorResolvePath(t *testing.T) {
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
err = conn.doRecursiveRemove(nil, "/fspath", "/vpath", vfs.NewFileInfo("vpath", true, 0, time.Now(), false), 2000)
|
err = conn.doRecursiveRemove(nil, "/fspath", "/vpath", vfs.NewFileInfo("vpath", true, 0, time.Now(), false), 2000)
|
||||||
assert.Error(t, err, util.ErrRecursionTooDeep)
|
assert.Error(t, err, util.ErrRecursionTooDeep)
|
||||||
|
err = conn.doRecursiveCopy("/src", "/dst", vfs.NewFileInfo("src", true, 0, time.Now(), false), false, 2000)
|
||||||
|
assert.Error(t, err, util.ErrRecursionTooDeep)
|
||||||
err = conn.checkCopy(vfs.NewFileInfo("name", true, 0, time.Unix(0, 0), false), nil, "/source", "/target")
|
err = conn.checkCopy(vfs.NewFileInfo("name", true, 0, time.Unix(0, 0), false), nil, "/source", "/target")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
sourceFile := filepath.Join(os.TempDir(), "f", "source")
|
sourceFile := filepath.Join(os.TempDir(), "f", "source")
|
||||||
|
@ -1144,13 +1146,16 @@ func TestListerAt(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, n)
|
require.Equal(t, 2, n)
|
||||||
assert.Equal(t, "..", files[0].Name())
|
assert.Equal(t, "..", files[0].Name())
|
||||||
assert.Equal(t, "p3", files[1].Name())
|
vfolders := []string{files[1].Name()}
|
||||||
files = make([]os.FileInfo, 200)
|
files = make([]os.FileInfo, 200)
|
||||||
n, err = lister.ListAt(files, 0)
|
n, err = lister.ListAt(files, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 102, n)
|
require.Equal(t, 102, n)
|
||||||
assert.Equal(t, "p2", files[0].Name())
|
vfolders = append(vfolders, files[0].Name())
|
||||||
assert.Equal(t, "p1", files[1].Name())
|
vfolders = append(vfolders, files[1].Name())
|
||||||
|
assert.Contains(t, vfolders, "p1")
|
||||||
|
assert.Contains(t, vfolders, "p2")
|
||||||
|
assert.Contains(t, vfolders, "p3")
|
||||||
err = lister.Close()
|
err = lister.Close()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2354,57 +2354,6 @@ func executeUserExpirationCheckRuleAction(conditions dataprovider.ConditionOptio
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func executeMetadataCheckForUser(user *dataprovider.User) error {
|
|
||||||
if err := user.LoadAndApplyGroupSettings(); err != nil {
|
|
||||||
eventManagerLog(logger.LevelError, "skipping scheduled quota reset for user %s, cannot apply group settings: %v",
|
|
||||||
user.Username, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !ActiveMetadataChecks.Add(user.Username, user.Role) {
|
|
||||||
eventManagerLog(logger.LevelError, "another metadata check is already in progress for user %q", user.Username)
|
|
||||||
return fmt.Errorf("another metadata check is in progress for user %q", user.Username)
|
|
||||||
}
|
|
||||||
defer ActiveMetadataChecks.Remove(user.Username)
|
|
||||||
|
|
||||||
if err := user.CheckMetadataConsistency(); err != nil {
|
|
||||||
eventManagerLog(logger.LevelError, "error checking metadata consistence for user %q: %v", user.Username, err)
|
|
||||||
return fmt.Errorf("error checking metadata consistence for user %q: %w", user.Username, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func executeMetadataCheckRuleAction(conditions dataprovider.ConditionOptions, params *EventParams) error {
|
|
||||||
users, err := params.getUsers()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to get users: %w", err)
|
|
||||||
}
|
|
||||||
var failures []string
|
|
||||||
var executed int
|
|
||||||
for _, user := range users {
|
|
||||||
// if sender is set, the conditions have already been evaluated
|
|
||||||
if params.sender == "" {
|
|
||||||
if !checkUserConditionOptions(&user, &conditions) {
|
|
||||||
eventManagerLog(logger.LevelDebug, "skipping metadata check for user %q, condition options don't match",
|
|
||||||
user.Username)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
executed++
|
|
||||||
if err = executeMetadataCheckForUser(&user); err != nil {
|
|
||||||
params.AddError(err)
|
|
||||||
failures = append(failures, user.Username)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(failures) > 0 {
|
|
||||||
return fmt.Errorf("metadata check failed for users: %s", strings.Join(failures, ", "))
|
|
||||||
}
|
|
||||||
if executed == 0 {
|
|
||||||
eventManagerLog(logger.LevelError, "no metadata check executed")
|
|
||||||
return errors.New("no metadata check executed")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func executePwdExpirationCheckForUser(user *dataprovider.User, config dataprovider.EventActionPasswordExpiration) error {
|
func executePwdExpirationCheckForUser(user *dataprovider.User, config dataprovider.EventActionPasswordExpiration) error {
|
||||||
if err := user.LoadAndApplyGroupSettings(); err != nil {
|
if err := user.LoadAndApplyGroupSettings(); err != nil {
|
||||||
eventManagerLog(logger.LevelError, "skipping password expiration check for user %q, cannot apply group settings: %v",
|
eventManagerLog(logger.LevelError, "skipping password expiration check for user %q, cannot apply group settings: %v",
|
||||||
|
@ -2568,8 +2517,6 @@ func executeRuleAction(action dataprovider.BaseEventAction, params *EventParams,
|
||||||
err = executeTransferQuotaResetRuleAction(conditions, params)
|
err = executeTransferQuotaResetRuleAction(conditions, params)
|
||||||
case dataprovider.ActionTypeDataRetentionCheck:
|
case dataprovider.ActionTypeDataRetentionCheck:
|
||||||
err = executeDataRetentionCheckRuleAction(action.Options.RetentionConfig, conditions, params, action.Name)
|
err = executeDataRetentionCheckRuleAction(action.Options.RetentionConfig, conditions, params, action.Name)
|
||||||
case dataprovider.ActionTypeMetadataCheck:
|
|
||||||
err = executeMetadataCheckRuleAction(conditions, params)
|
|
||||||
case dataprovider.ActionTypeFilesystem:
|
case dataprovider.ActionTypeFilesystem:
|
||||||
err = executeFsRuleAction(action.Options.FsConfig, conditions, params)
|
err = executeFsRuleAction(action.Options.FsConfig, conditions, params)
|
||||||
case dataprovider.ActionTypePasswordExpirationCheck:
|
case dataprovider.ActionTypePasswordExpirationCheck:
|
||||||
|
|
|
@ -605,8 +605,6 @@ func TestEventManagerErrors(t *testing.T) {
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
err = executeTransferQuotaResetRuleAction(dataprovider.ConditionOptions{}, &EventParams{})
|
err = executeTransferQuotaResetRuleAction(dataprovider.ConditionOptions{}, &EventParams{})
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
err = executeMetadataCheckRuleAction(dataprovider.ConditionOptions{}, &EventParams{})
|
|
||||||
assert.Error(t, err)
|
|
||||||
err = executeUserExpirationCheckRuleAction(dataprovider.ConditionOptions{}, &EventParams{})
|
err = executeUserExpirationCheckRuleAction(dataprovider.ConditionOptions{}, &EventParams{})
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
err = executeDeleteFsRuleAction(nil, nil, dataprovider.ConditionOptions{}, &EventParams{})
|
err = executeDeleteFsRuleAction(nil, nil, dataprovider.ConditionOptions{}, &EventParams{})
|
||||||
|
@ -639,15 +637,6 @@ func TestEventManagerErrors(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
err = executeMetadataCheckForUser(&dataprovider.User{
|
|
||||||
Groups: []sdk.GroupMapping{
|
|
||||||
{
|
|
||||||
Name: groupName,
|
|
||||||
Type: sdk.GroupTypePrimary,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
assert.Error(t, err)
|
|
||||||
err = executeDataRetentionCheckForUser(dataprovider.User{
|
err = executeDataRetentionCheckForUser(dataprovider.User{
|
||||||
Groups: []sdk.GroupMapping{
|
Groups: []sdk.GroupMapping{
|
||||||
{
|
{
|
||||||
|
@ -987,40 +976,6 @@ func TestEventRuleActions(t *testing.T) {
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Contains(t, getErrorString(err), "no user quota reset executed")
|
assert.Contains(t, getErrorString(err), "no user quota reset executed")
|
||||||
|
|
||||||
action = dataprovider.BaseEventAction{
|
|
||||||
Type: dataprovider.ActionTypeMetadataCheck,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = executeRuleAction(action, &EventParams{}, dataprovider.ConditionOptions{
|
|
||||||
Names: []dataprovider.ConditionPattern{
|
|
||||||
{
|
|
||||||
Pattern: "don't match",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, getErrorString(err), "no metadata check executed")
|
|
||||||
|
|
||||||
err = executeRuleAction(action, &EventParams{}, dataprovider.ConditionOptions{
|
|
||||||
Names: []dataprovider.ConditionPattern{
|
|
||||||
{
|
|
||||||
Pattern: username1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
// simulate another metadata check in progress
|
|
||||||
assert.True(t, ActiveMetadataChecks.Add(username1, ""))
|
|
||||||
err = executeRuleAction(action, &EventParams{}, dataprovider.ConditionOptions{
|
|
||||||
Names: []dataprovider.ConditionPattern{
|
|
||||||
{
|
|
||||||
Pattern: username1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.True(t, ActiveMetadataChecks.Remove(username1))
|
|
||||||
|
|
||||||
action = dataprovider.BaseEventAction{
|
action = dataprovider.BaseEventAction{
|
||||||
Type: dataprovider.ActionTypeUserExpirationCheck,
|
Type: dataprovider.ActionTypeUserExpirationCheck,
|
||||||
}
|
}
|
||||||
|
@ -1551,10 +1506,6 @@ func TestEventRuleActionsNoGroupMatching(t *testing.T) {
|
||||||
if assert.Error(t, err) {
|
if assert.Error(t, err) {
|
||||||
assert.Contains(t, err.Error(), "no user quota reset executed")
|
assert.Contains(t, err.Error(), "no user quota reset executed")
|
||||||
}
|
}
|
||||||
err = executeMetadataCheckRuleAction(conditions, &EventParams{})
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.Contains(t, err.Error(), "no metadata check executed")
|
|
||||||
}
|
|
||||||
err = executeTransferQuotaResetRuleAction(conditions, &EventParams{})
|
err = executeTransferQuotaResetRuleAction(conditions, &EventParams{})
|
||||||
if assert.Error(t, err) {
|
if assert.Error(t, err) {
|
||||||
assert.Contains(t, err.Error(), "no transfer quota reset executed")
|
assert.Contains(t, err.Error(), "no transfer quota reset executed")
|
||||||
|
@ -1840,6 +1791,14 @@ func TestFilesystemActionErrors(t *testing.T) {
|
||||||
assert.Contains(t, getErrorString(err), "is outside base dir")
|
assert.Contains(t, getErrorString(err), "is outside base dir")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wr := &zipWriterWrapper{
|
||||||
|
Name: xid.New().String() + ".zip",
|
||||||
|
Writer: zip.NewWriter(bytes.NewBuffer(nil)),
|
||||||
|
Entries: map[string]bool{},
|
||||||
|
}
|
||||||
|
err = addZipEntry(wr, conn, "/p1", "/", 2000)
|
||||||
|
assert.ErrorIs(t, err, util.ErrRecursionTooDeep)
|
||||||
|
|
||||||
err = dataprovider.DeleteUser(username, "", "", "")
|
err = dataprovider.DeleteUser(username, "", "", "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = os.RemoveAll(user.GetHomeDir())
|
err = os.RemoveAll(user.GetHomeDir())
|
||||||
|
|
|
@ -8077,10 +8077,9 @@ func TestSFTPLoopError(t *testing.T) {
|
||||||
assert.NoError(t, err, string(resp))
|
assert.NoError(t, err, string(resp))
|
||||||
user2, resp, err = httpdtest.AddUser(user2, http.StatusCreated)
|
user2, resp, err = httpdtest.AddUser(user2, http.StatusCreated)
|
||||||
assert.NoError(t, err, string(resp))
|
assert.NoError(t, err, string(resp))
|
||||||
// test metadata check event error
|
|
||||||
a1 := dataprovider.BaseEventAction{
|
a1 := dataprovider.BaseEventAction{
|
||||||
Name: "a1",
|
Name: "a1",
|
||||||
Type: dataprovider.ActionTypeMetadataCheck,
|
Type: dataprovider.ActionTypeUserQuotaReset,
|
||||||
}
|
}
|
||||||
action1, _, err := httpdtest.AddEventAction(a1, http.StatusCreated)
|
action1, _, err := httpdtest.AddEventAction(a1, http.StatusCreated)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -54,7 +54,6 @@ const (
|
||||||
PermAdminManageDefender = "manage_defender"
|
PermAdminManageDefender = "manage_defender"
|
||||||
PermAdminViewDefender = "view_defender"
|
PermAdminViewDefender = "view_defender"
|
||||||
PermAdminRetentionChecks = "retention_checks"
|
PermAdminRetentionChecks = "retention_checks"
|
||||||
PermAdminMetadataChecks = "metadata_checks"
|
|
||||||
PermAdminViewEvents = "view_events"
|
PermAdminViewEvents = "view_events"
|
||||||
PermAdminManageEventRules = "manage_event_rules"
|
PermAdminManageEventRules = "manage_event_rules"
|
||||||
PermAdminManageRoles = "manage_roles"
|
PermAdminManageRoles = "manage_roles"
|
||||||
|
@ -76,7 +75,7 @@ var (
|
||||||
PermAdminCloseConnections, PermAdminViewServerStatus, PermAdminManageAdmins, PermAdminManageRoles,
|
PermAdminCloseConnections, PermAdminViewServerStatus, PermAdminManageAdmins, PermAdminManageRoles,
|
||||||
PermAdminManageEventRules, PermAdminManageAPIKeys, PermAdminQuotaScans, PermAdminManageSystem,
|
PermAdminManageEventRules, PermAdminManageAPIKeys, PermAdminQuotaScans, PermAdminManageSystem,
|
||||||
PermAdminManageDefender, PermAdminViewDefender, PermAdminManageIPLists, PermAdminRetentionChecks,
|
PermAdminManageDefender, PermAdminViewDefender, PermAdminManageIPLists, PermAdminRetentionChecks,
|
||||||
PermAdminMetadataChecks, PermAdminViewEvents}
|
PermAdminViewEvents}
|
||||||
forbiddenPermsForRoleAdmins = []string{PermAdminAny, PermAdminManageAdmins, PermAdminManageSystem,
|
forbiddenPermsForRoleAdmins = []string{PermAdminAny, PermAdminManageAdmins, PermAdminManageSystem,
|
||||||
PermAdminManageEventRules, PermAdminManageIPLists, PermAdminManageRoles}
|
PermAdminManageEventRules, PermAdminManageIPLists, PermAdminManageRoles}
|
||||||
)
|
)
|
||||||
|
|
|
@ -44,7 +44,7 @@ const (
|
||||||
ActionTypeTransferQuotaReset
|
ActionTypeTransferQuotaReset
|
||||||
ActionTypeDataRetentionCheck
|
ActionTypeDataRetentionCheck
|
||||||
ActionTypeFilesystem
|
ActionTypeFilesystem
|
||||||
ActionTypeMetadataCheck
|
actionTypeReserved
|
||||||
ActionTypePasswordExpirationCheck
|
ActionTypePasswordExpirationCheck
|
||||||
ActionTypeUserExpirationCheck
|
ActionTypeUserExpirationCheck
|
||||||
ActionTypeIDPAccountCheck
|
ActionTypeIDPAccountCheck
|
||||||
|
@ -53,7 +53,7 @@ const (
|
||||||
var (
|
var (
|
||||||
supportedEventActions = []int{ActionTypeHTTP, ActionTypeCommand, ActionTypeEmail, ActionTypeFilesystem,
|
supportedEventActions = []int{ActionTypeHTTP, ActionTypeCommand, ActionTypeEmail, ActionTypeFilesystem,
|
||||||
ActionTypeBackup, ActionTypeUserQuotaReset, ActionTypeFolderQuotaReset, ActionTypeTransferQuotaReset,
|
ActionTypeBackup, ActionTypeUserQuotaReset, ActionTypeFolderQuotaReset, ActionTypeTransferQuotaReset,
|
||||||
ActionTypeDataRetentionCheck, ActionTypeMetadataCheck, ActionTypePasswordExpirationCheck,
|
ActionTypeDataRetentionCheck, ActionTypePasswordExpirationCheck,
|
||||||
ActionTypeUserExpirationCheck, ActionTypeIDPAccountCheck}
|
ActionTypeUserExpirationCheck, ActionTypeIDPAccountCheck}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -77,8 +77,6 @@ func getActionTypeAsString(action int) string {
|
||||||
return util.I18nActionTypeTransferQuotaReset
|
return util.I18nActionTypeTransferQuotaReset
|
||||||
case ActionTypeDataRetentionCheck:
|
case ActionTypeDataRetentionCheck:
|
||||||
return util.I18nActionTypeDataRetentionCheck
|
return util.I18nActionTypeDataRetentionCheck
|
||||||
case ActionTypeMetadataCheck:
|
|
||||||
return util.I18nActionTypeMetadataCheck
|
|
||||||
case ActionTypeFilesystem:
|
case ActionTypeFilesystem:
|
||||||
return util.I18nActionTypeFilesystem
|
return util.I18nActionTypeFilesystem
|
||||||
case ActionTypePasswordExpirationCheck:
|
case ActionTypePasswordExpirationCheck:
|
||||||
|
@ -1652,7 +1650,7 @@ func (r *EventRule) validateMandatorySyncActions() error {
|
||||||
|
|
||||||
func (r *EventRule) checkIPBlockedAndCertificateActions() error {
|
func (r *EventRule) checkIPBlockedAndCertificateActions() error {
|
||||||
unavailableActions := []int{ActionTypeUserQuotaReset, ActionTypeFolderQuotaReset, ActionTypeTransferQuotaReset,
|
unavailableActions := []int{ActionTypeUserQuotaReset, ActionTypeFolderQuotaReset, ActionTypeTransferQuotaReset,
|
||||||
ActionTypeDataRetentionCheck, ActionTypeMetadataCheck, ActionTypeFilesystem, ActionTypePasswordExpirationCheck,
|
ActionTypeDataRetentionCheck, ActionTypeFilesystem, ActionTypePasswordExpirationCheck,
|
||||||
ActionTypeUserExpirationCheck}
|
ActionTypeUserExpirationCheck}
|
||||||
for _, action := range r.Actions {
|
for _, action := range r.Actions {
|
||||||
if util.Contains(unavailableActions, action.Type) {
|
if util.Contains(unavailableActions, action.Type) {
|
||||||
|
@ -1668,7 +1666,7 @@ func (r *EventRule) checkProviderEventActions(providerObjectType string) error {
|
||||||
// can be executed only if we modify a user. They will be executed for the
|
// can be executed only if we modify a user. They will be executed for the
|
||||||
// affected user. Folder quota reset can be executed only for folders.
|
// affected user. Folder quota reset can be executed only for folders.
|
||||||
userSpecificActions := []int{ActionTypeUserQuotaReset, ActionTypeTransferQuotaReset,
|
userSpecificActions := []int{ActionTypeUserQuotaReset, ActionTypeTransferQuotaReset,
|
||||||
ActionTypeDataRetentionCheck, ActionTypeMetadataCheck, ActionTypeFilesystem,
|
ActionTypeDataRetentionCheck, ActionTypeFilesystem,
|
||||||
ActionTypePasswordExpirationCheck, ActionTypeUserExpirationCheck}
|
ActionTypePasswordExpirationCheck, ActionTypeUserExpirationCheck}
|
||||||
for _, action := range r.Actions {
|
for _, action := range r.Actions {
|
||||||
if util.Contains(userSpecificActions, action.Type) && providerObjectType != actionObjectUser {
|
if util.Contains(userSpecificActions, action.Type) && providerObjectType != actionObjectUser {
|
||||||
|
|
|
@ -620,27 +620,6 @@ func (u *User) GetVirtualFolderForPath(virtualPath string) (vfs.VirtualFolder, e
|
||||||
return folder, errNoMatchingVirtualFolder
|
return folder, errNoMatchingVirtualFolder
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckMetadataConsistency checks the consistency between the metadata stored
|
|
||||||
// in the configured metadata plugin and the filesystem
|
|
||||||
func (u *User) CheckMetadataConsistency() error {
|
|
||||||
fs, err := u.getRootFs(xid.New().String())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer fs.Close()
|
|
||||||
|
|
||||||
if err = fs.CheckMetadata(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for idx := range u.VirtualFolders {
|
|
||||||
v := &u.VirtualFolders[idx]
|
|
||||||
if err = v.CheckMetadataConsistency(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScanQuota scans the user home dir and virtual folders, included in its quota,
|
// ScanQuota scans the user home dir and virtual folders, included in its quota,
|
||||||
// and returns the number of files and their size
|
// and returns the number of files and their size
|
||||||
func (u *User) ScanQuota() (int, int64, error) {
|
func (u *User) ScanQuota() (int, int64, error) {
|
||||||
|
|
|
@ -1,71 +0,0 @@
|
||||||
// Copyright (C) 2019 Nicola Murino
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published
|
|
||||||
// by the Free Software Foundation, version 3.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package httpd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/go-chi/render"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/common"
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getMetadataChecks(w http.ResponseWriter, r *http.Request) {
|
|
||||||
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
|
||||||
claims, err := getTokenClaims(r)
|
|
||||||
if err != nil || claims.Username == "" {
|
|
||||||
sendAPIResponse(w, r, err, "Invalid token claims", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
render.JSON(w, r, common.ActiveMetadataChecks.Get(claims.Role))
|
|
||||||
}
|
|
||||||
|
|
||||||
func startMetadataCheck(w http.ResponseWriter, r *http.Request) {
|
|
||||||
r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize)
|
|
||||||
claims, err := getTokenClaims(r)
|
|
||||||
if err != nil || claims.Username == "" {
|
|
||||||
sendAPIResponse(w, r, err, "Invalid token claims", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
user, err := dataprovider.GetUserWithGroupSettings(getURLParam(r, "username"), claims.Role)
|
|
||||||
if err != nil {
|
|
||||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !common.ActiveMetadataChecks.Add(user.Username, user.Role) {
|
|
||||||
sendAPIResponse(w, r, err, fmt.Sprintf("Another check is already in progress for user %q", user.Username),
|
|
||||||
http.StatusConflict)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
go doMetadataCheck(user) //nolint:errcheck
|
|
||||||
|
|
||||||
sendAPIResponse(w, r, err, "Check started", http.StatusAccepted)
|
|
||||||
}
|
|
||||||
|
|
||||||
func doMetadataCheck(user dataprovider.User) error {
|
|
||||||
defer common.ActiveMetadataChecks.Remove(user.Username)
|
|
||||||
|
|
||||||
err := user.CheckMetadataConsistency()
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(logSender, "", "error checking metadata for user %q: %v", user.Username, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logger.Debug(logSender, "", "metadata check completed for user: %q", user.Username)
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -86,8 +86,6 @@ const (
|
||||||
userSharesPath = "/api/v2/user/shares"
|
userSharesPath = "/api/v2/user/shares"
|
||||||
retentionBasePath = "/api/v2/retention/users"
|
retentionBasePath = "/api/v2/retention/users"
|
||||||
retentionChecksPath = "/api/v2/retention/users/checks"
|
retentionChecksPath = "/api/v2/retention/users/checks"
|
||||||
metadataBasePath = "/api/v2/metadata/users"
|
|
||||||
metadataChecksPath = "/api/v2/metadata/users/checks"
|
|
||||||
fsEventsPath = "/api/v2/events/fs"
|
fsEventsPath = "/api/v2/events/fs"
|
||||||
providerEventsPath = "/api/v2/events/provider"
|
providerEventsPath = "/api/v2/events/provider"
|
||||||
logEventsPath = "/api/v2/events/logs"
|
logEventsPath = "/api/v2/events/logs"
|
||||||
|
|
|
@ -122,7 +122,6 @@ const (
|
||||||
userProfilePath = "/api/v2/user/profile"
|
userProfilePath = "/api/v2/user/profile"
|
||||||
userSharesPath = "/api/v2/user/shares"
|
userSharesPath = "/api/v2/user/shares"
|
||||||
retentionBasePath = "/api/v2/retention/users"
|
retentionBasePath = "/api/v2/retention/users"
|
||||||
metadataBasePath = "/api/v2/metadata/users"
|
|
||||||
fsEventsPath = "/api/v2/events/fs"
|
fsEventsPath = "/api/v2/events/fs"
|
||||||
providerEventsPath = "/api/v2/events/provider"
|
providerEventsPath = "/api/v2/events/provider"
|
||||||
logEventsPath = "/api/v2/events/logs"
|
logEventsPath = "/api/v2/events/logs"
|
||||||
|
@ -4624,52 +4623,6 @@ func TestUserType(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetadataAPIMock(t *testing.T) {
|
|
||||||
user, _, err := httpdtest.AddUser(getTestUser(), http.StatusCreated)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
token, err := getJWTAPITokenFromTestServer(defaultTokenAuthUser, defaultTokenAuthPass)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
req, err := http.NewRequest(http.MethodGet, path.Join(metadataBasePath, "/checks"), nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
setBearerForReq(req, token)
|
|
||||||
rr := executeRequest(req)
|
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
|
||||||
var resp []any
|
|
||||||
err = json.Unmarshal(rr.Body.Bytes(), &resp)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, resp, 0)
|
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodPost, path.Join(metadataBasePath, user.Username, "/check"), nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
setBearerForReq(req, token)
|
|
||||||
rr = executeRequest(req)
|
|
||||||
checkResponseCode(t, http.StatusAccepted, rr)
|
|
||||||
|
|
||||||
assert.Eventually(t, func() bool {
|
|
||||||
req, err := http.NewRequest(http.MethodGet, path.Join(metadataBasePath, "/checks"), nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
setBearerForReq(req, token)
|
|
||||||
rr := executeRequest(req)
|
|
||||||
checkResponseCode(t, http.StatusOK, rr)
|
|
||||||
var resp []any
|
|
||||||
err = json.Unmarshal(rr.Body.Bytes(), &resp)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
return len(resp) == 0
|
|
||||||
}, 1000*time.Millisecond, 50*time.Millisecond)
|
|
||||||
|
|
||||||
_, err = httpdtest.RemoveUser(user, http.StatusOK)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.RemoveAll(user.GetHomeDir())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodPost, path.Join(metadataBasePath, user.Username, "/check"), nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
setBearerForReq(req, token)
|
|
||||||
rr = executeRequest(req)
|
|
||||||
checkResponseCode(t, http.StatusNotFound, rr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRetentionAPI(t *testing.T) {
|
func TestRetentionAPI(t *testing.T) {
|
||||||
user, _, err := httpdtest.AddUser(getTestUser(), http.StatusCreated)
|
user, _, err := httpdtest.AddUser(getTestUser(), http.StatusCreated)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -731,16 +731,6 @@ func TestInvalidToken(t *testing.T) {
|
||||||
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
assert.Contains(t, rr.Body.String(), "Invalid token claims")
|
assert.Contains(t, rr.Body.String(), "Invalid token claims")
|
||||||
|
|
||||||
rr = httptest.NewRecorder()
|
|
||||||
getMetadataChecks(rr, req)
|
|
||||||
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
|
||||||
assert.Contains(t, rr.Body.String(), "Invalid token claims")
|
|
||||||
|
|
||||||
rr = httptest.NewRecorder()
|
|
||||||
startMetadataCheck(rr, req)
|
|
||||||
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
|
||||||
assert.Contains(t, rr.Body.String(), "Invalid token claims")
|
|
||||||
|
|
||||||
rr = httptest.NewRecorder()
|
rr = httptest.NewRecorder()
|
||||||
getUsersQuotaScans(rr, req)
|
getUsersQuotaScans(rr, req)
|
||||||
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
assert.Equal(t, http.StatusBadRequest, rr.Code)
|
||||||
|
@ -2852,52 +2842,6 @@ func TestUserCanResetPassword(t *testing.T) {
|
||||||
assert.False(t, isUserAllowedToResetPassword(req, &u))
|
assert.False(t, isUserAllowedToResetPassword(req, &u))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetadataAPI(t *testing.T) {
|
|
||||||
username := "metadatauser"
|
|
||||||
assert.False(t, common.ActiveMetadataChecks.Remove(username))
|
|
||||||
|
|
||||||
user := dataprovider.User{
|
|
||||||
BaseUser: sdk.BaseUser{
|
|
||||||
Username: username,
|
|
||||||
Password: "metadata_pwd",
|
|
||||||
HomeDir: filepath.Join(os.TempDir(), username),
|
|
||||||
Status: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
user.Permissions = make(map[string][]string)
|
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
|
||||||
err := dataprovider.AddUser(&user, "", "", "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.True(t, common.ActiveMetadataChecks.Add(username, ""))
|
|
||||||
|
|
||||||
tokenAuth := jwtauth.New(jwa.HS256.String(), util.GenerateRandomBytes(32), nil)
|
|
||||||
claims := make(map[string]any)
|
|
||||||
claims["username"] = defaultAdminUsername
|
|
||||||
claims[jwt.ExpirationKey] = time.Now().UTC().Add(1 * time.Hour)
|
|
||||||
token, _, err := tokenAuth.Encode(claims)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
req, err := http.NewRequest(http.MethodPost, path.Join(metadataBasePath, username, "check"), nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
rctx := chi.NewRouteContext()
|
|
||||||
rctx.URLParams.Add("username", username)
|
|
||||||
req = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, rctx))
|
|
||||||
req = req.WithContext(context.WithValue(req.Context(), jwtauth.TokenCtxKey, token))
|
|
||||||
|
|
||||||
rr := httptest.NewRecorder()
|
|
||||||
startMetadataCheck(rr, req)
|
|
||||||
assert.Equal(t, http.StatusConflict, rr.Code, rr.Body.String())
|
|
||||||
|
|
||||||
assert.True(t, common.ActiveMetadataChecks.Remove(username))
|
|
||||||
assert.Len(t, common.ActiveMetadataChecks.Get(""), 0)
|
|
||||||
err = dataprovider.DeleteUser(username, "", "", "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
user.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
|
||||||
err = doMetadataCheck(user)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBrowsableSharePaths(t *testing.T) {
|
func TestBrowsableSharePaths(t *testing.T) {
|
||||||
share := dataprovider.Share{
|
share := dataprovider.Share{
|
||||||
Paths: []string{"/"},
|
Paths: []string{"/"},
|
||||||
|
|
|
@ -1378,9 +1378,6 @@ func (s *httpdServer) initializeRouter() {
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminRetentionChecks)).Get(retentionChecksPath, getRetentionChecks)
|
router.With(s.checkPerm(dataprovider.PermAdminRetentionChecks)).Get(retentionChecksPath, getRetentionChecks)
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminRetentionChecks)).Post(retentionBasePath+"/{username}/check",
|
router.With(s.checkPerm(dataprovider.PermAdminRetentionChecks)).Post(retentionBasePath+"/{username}/check",
|
||||||
startRetentionCheck)
|
startRetentionCheck)
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminMetadataChecks)).Get(metadataChecksPath, getMetadataChecks)
|
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminMetadataChecks)).Post(metadataBasePath+"/{username}/check",
|
|
||||||
startMetadataCheck)
|
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminViewEvents), compressor.Handler).
|
router.With(s.checkPerm(dataprovider.PermAdminViewEvents), compressor.Handler).
|
||||||
Get(fsEventsPath, searchFsEvents)
|
Get(fsEventsPath, searchFsEvents)
|
||||||
router.With(s.checkPerm(dataprovider.PermAdminViewEvents), compressor.Handler).
|
router.With(s.checkPerm(dataprovider.PermAdminViewEvents), compressor.Handler).
|
||||||
|
|
|
@ -1,94 +0,0 @@
|
||||||
// Copyright (C) 2019 Nicola Murino
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published
|
|
||||||
// by the Free Software Foundation, version 3.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package plugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/hashicorp/go-hclog"
|
|
||||||
"github.com/hashicorp/go-plugin"
|
|
||||||
"github.com/sftpgo/sdk/plugin/metadata"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
type metadataPlugin struct {
|
|
||||||
config Config
|
|
||||||
metadater metadata.Metadater
|
|
||||||
client *plugin.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMetadaterPlugin(config Config) (*metadataPlugin, error) {
|
|
||||||
p := &metadataPlugin{
|
|
||||||
config: config,
|
|
||||||
}
|
|
||||||
if err := p.initialize(); err != nil {
|
|
||||||
logger.Warn(logSender, "", "unable to create metadata plugin: %v, config %+v", err, config)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *metadataPlugin) exited() bool {
|
|
||||||
return p.client.Exited()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *metadataPlugin) cleanup() {
|
|
||||||
p.client.Kill()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *metadataPlugin) initialize() error {
|
|
||||||
killProcess(p.config.Cmd)
|
|
||||||
logger.Debug(logSender, "", "create new metadata plugin %q", p.config.Cmd)
|
|
||||||
secureConfig, err := p.config.getSecureConfig()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
client := plugin.NewClient(&plugin.ClientConfig{
|
|
||||||
HandshakeConfig: metadata.Handshake,
|
|
||||||
Plugins: metadata.PluginMap,
|
|
||||||
Cmd: p.config.getCommand(),
|
|
||||||
SkipHostEnv: true,
|
|
||||||
AllowedProtocols: []plugin.Protocol{
|
|
||||||
plugin.ProtocolGRPC,
|
|
||||||
},
|
|
||||||
Managed: false,
|
|
||||||
AutoMTLS: p.config.AutoMTLS,
|
|
||||||
SecureConfig: secureConfig,
|
|
||||||
Logger: &logger.HCLogAdapter{
|
|
||||||
Logger: hclog.New(&hclog.LoggerOptions{
|
|
||||||
Name: fmt.Sprintf("%v.%v", logSender, metadata.PluginName),
|
|
||||||
Level: pluginsLogLevel,
|
|
||||||
DisableTime: true,
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
rpcClient, err := client.Client()
|
|
||||||
if err != nil {
|
|
||||||
logger.Debug(logSender, "", "unable to get rpc client for plugin %q: %v", p.config.Cmd, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
raw, err := rpcClient.Dispense(metadata.PluginName)
|
|
||||||
if err != nil {
|
|
||||||
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %q: %v",
|
|
||||||
metadata.PluginName, p.config.Cmd, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.client = client
|
|
||||||
p.metadater = raw.(metadata.Metadater)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -35,7 +35,6 @@ import (
|
||||||
"github.com/sftpgo/sdk/plugin/eventsearcher"
|
"github.com/sftpgo/sdk/plugin/eventsearcher"
|
||||||
"github.com/sftpgo/sdk/plugin/ipfilter"
|
"github.com/sftpgo/sdk/plugin/ipfilter"
|
||||||
kmsplugin "github.com/sftpgo/sdk/plugin/kms"
|
kmsplugin "github.com/sftpgo/sdk/plugin/kms"
|
||||||
"github.com/sftpgo/sdk/plugin/metadata"
|
|
||||||
"github.com/sftpgo/sdk/plugin/notifier"
|
"github.com/sftpgo/sdk/plugin/notifier"
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/kms"
|
"github.com/drakkan/sftpgo/v2/internal/kms"
|
||||||
|
@ -53,8 +52,6 @@ var (
|
||||||
pluginsLogLevel = hclog.Debug
|
pluginsLogLevel = hclog.Debug
|
||||||
// ErrNoSearcher defines the error to return for events searches if no plugin is configured
|
// ErrNoSearcher defines the error to return for events searches if no plugin is configured
|
||||||
ErrNoSearcher = errors.New("no events searcher plugin defined")
|
ErrNoSearcher = errors.New("no events searcher plugin defined")
|
||||||
// ErrNoMetadater returns the error to return for metadata methods if no plugin is configured
|
|
||||||
ErrNoMetadater = errors.New("no metadata plugin defined")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Renderer defines the interface for generic objects rendering
|
// Renderer defines the interface for generic objects rendering
|
||||||
|
@ -172,13 +169,10 @@ type Manager struct {
|
||||||
auths []*authPlugin
|
auths []*authPlugin
|
||||||
searcherLock sync.RWMutex
|
searcherLock sync.RWMutex
|
||||||
searcher *searcherPlugin
|
searcher *searcherPlugin
|
||||||
metadaterLock sync.RWMutex
|
|
||||||
metadater *metadataPlugin
|
|
||||||
ipFilterLock sync.RWMutex
|
ipFilterLock sync.RWMutex
|
||||||
filter *ipFilterPlugin
|
filter *ipFilterPlugin
|
||||||
authScopes int
|
authScopes int
|
||||||
hasSearcher bool
|
hasSearcher bool
|
||||||
hasMetadater bool
|
|
||||||
hasNotifiers bool
|
hasNotifiers bool
|
||||||
hasAuths bool
|
hasAuths bool
|
||||||
hasIPFilter bool
|
hasIPFilter bool
|
||||||
|
@ -250,12 +244,6 @@ func initializePlugins() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
Handler.searcher = plugin
|
Handler.searcher = plugin
|
||||||
case metadata.PluginName:
|
|
||||||
plugin, err := newMetadaterPlugin(config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
Handler.metadater = plugin
|
|
||||||
case ipfilter.PluginName:
|
case ipfilter.PluginName:
|
||||||
plugin, err := newIPFilterPlugin(config)
|
plugin, err := newIPFilterPlugin(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -274,7 +262,6 @@ func (m *Manager) validateConfigs() error {
|
||||||
kmsSchemes := make(map[string]bool)
|
kmsSchemes := make(map[string]bool)
|
||||||
kmsEncryptions := make(map[string]bool)
|
kmsEncryptions := make(map[string]bool)
|
||||||
m.hasSearcher = false
|
m.hasSearcher = false
|
||||||
m.hasMetadater = false
|
|
||||||
m.hasNotifiers = false
|
m.hasNotifiers = false
|
||||||
m.hasAuths = false
|
m.hasAuths = false
|
||||||
m.hasIPFilter = false
|
m.hasIPFilter = false
|
||||||
|
@ -295,11 +282,6 @@ func (m *Manager) validateConfigs() error {
|
||||||
return errors.New("only one eventsearcher plugin can be defined")
|
return errors.New("only one eventsearcher plugin can be defined")
|
||||||
}
|
}
|
||||||
m.hasSearcher = true
|
m.hasSearcher = true
|
||||||
case metadata.PluginName:
|
|
||||||
if m.hasMetadater {
|
|
||||||
return errors.New("only one metadata plugin can be defined")
|
|
||||||
}
|
|
||||||
m.hasMetadater = true
|
|
||||||
case notifier.PluginName:
|
case notifier.PluginName:
|
||||||
m.hasNotifiers = true
|
m.hasNotifiers = true
|
||||||
case auth.PluginName:
|
case auth.PluginName:
|
||||||
|
@ -405,71 +387,6 @@ func (m *Manager) SearchLogEvents(searchFilters *eventsearcher.LogEventSearch) (
|
||||||
return plugin.searchear.SearchLogEvents(searchFilters)
|
return plugin.searchear.SearchLogEvents(searchFilters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasMetadater returns true if a metadata plugin is defined
|
|
||||||
func (m *Manager) HasMetadater() bool {
|
|
||||||
return m.hasMetadater
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModificationTime sets the modification time for the specified object
|
|
||||||
func (m *Manager) SetModificationTime(storageID, objectPath string, mTime int64) error {
|
|
||||||
if !m.hasMetadater {
|
|
||||||
return ErrNoMetadater
|
|
||||||
}
|
|
||||||
m.metadaterLock.RLock()
|
|
||||||
plugin := m.metadater
|
|
||||||
m.metadaterLock.RUnlock()
|
|
||||||
|
|
||||||
return plugin.metadater.SetModificationTime(storageID, objectPath, mTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetModificationTime returns the modification time for the specified path
|
|
||||||
func (m *Manager) GetModificationTime(storageID, objectPath string, _ bool) (int64, error) {
|
|
||||||
if !m.hasMetadater {
|
|
||||||
return 0, ErrNoMetadater
|
|
||||||
}
|
|
||||||
m.metadaterLock.RLock()
|
|
||||||
plugin := m.metadater
|
|
||||||
m.metadaterLock.RUnlock()
|
|
||||||
|
|
||||||
return plugin.metadater.GetModificationTime(storageID, objectPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetModificationTimes returns the modification times for all the files within the specified folder
|
|
||||||
func (m *Manager) GetModificationTimes(storageID, objectPath string) (map[string]int64, error) {
|
|
||||||
if !m.hasMetadater {
|
|
||||||
return nil, ErrNoMetadater
|
|
||||||
}
|
|
||||||
m.metadaterLock.RLock()
|
|
||||||
plugin := m.metadater
|
|
||||||
m.metadaterLock.RUnlock()
|
|
||||||
|
|
||||||
return plugin.metadater.GetModificationTimes(storageID, objectPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveMetadata deletes the metadata stored for the specified object
|
|
||||||
func (m *Manager) RemoveMetadata(storageID, objectPath string) error {
|
|
||||||
if !m.hasMetadater {
|
|
||||||
return ErrNoMetadater
|
|
||||||
}
|
|
||||||
m.metadaterLock.RLock()
|
|
||||||
plugin := m.metadater
|
|
||||||
m.metadaterLock.RUnlock()
|
|
||||||
|
|
||||||
return plugin.metadater.RemoveMetadata(storageID, objectPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetadataFolders returns the folders that metadata is associated with
|
|
||||||
func (m *Manager) GetMetadataFolders(storageID, from string, limit int) ([]string, error) {
|
|
||||||
if !m.hasMetadater {
|
|
||||||
return nil, ErrNoMetadater
|
|
||||||
}
|
|
||||||
m.metadaterLock.RLock()
|
|
||||||
plugin := m.metadater
|
|
||||||
m.metadaterLock.RUnlock()
|
|
||||||
|
|
||||||
return plugin.metadater.GetFolders(storageID, limit, from)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsIPBanned returns true if the IP filter plugin does not allow the specified ip.
|
// IsIPBanned returns true if the IP filter plugin does not allow the specified ip.
|
||||||
// If no IP filter plugin is defined this method returns false
|
// If no IP filter plugin is defined this method returns false
|
||||||
func (m *Manager) IsIPBanned(ip, protocol string) bool {
|
func (m *Manager) IsIPBanned(ip, protocol string) bool {
|
||||||
|
@ -689,16 +606,6 @@ func (m *Manager) checkCrashedPlugins() {
|
||||||
m.searcherLock.RUnlock()
|
m.searcherLock.RUnlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.hasMetadater {
|
|
||||||
m.metadaterLock.RLock()
|
|
||||||
if m.metadater.exited() {
|
|
||||||
defer func(cfg Config) {
|
|
||||||
Handler.restartMetadaterPlugin(cfg)
|
|
||||||
}(m.metadater.config)
|
|
||||||
}
|
|
||||||
m.metadaterLock.RUnlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.hasIPFilter {
|
if m.hasIPFilter {
|
||||||
m.ipFilterLock.RLock()
|
m.ipFilterLock.RLock()
|
||||||
if m.filter.exited() {
|
if m.filter.exited() {
|
||||||
|
@ -776,22 +683,6 @@ func (m *Manager) restartSearcherPlugin(config Config) {
|
||||||
m.searcherLock.Unlock()
|
m.searcherLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) restartMetadaterPlugin(config Config) {
|
|
||||||
if m.closed.Load() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
logger.Info(logSender, "", "try to restart crashed metadater plugin %q", config.Cmd)
|
|
||||||
plugin, err := newMetadaterPlugin(config)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(logSender, "", "unable to restart metadater plugin %q, err: %v", config.Cmd, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m.metadaterLock.Lock()
|
|
||||||
m.metadater = plugin
|
|
||||||
m.metadaterLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Manager) restartIPFilterPlugin(config Config) {
|
func (m *Manager) restartIPFilterPlugin(config Config) {
|
||||||
if m.closed.Load() {
|
if m.closed.Load() {
|
||||||
return
|
return
|
||||||
|
@ -851,13 +742,6 @@ func (m *Manager) Cleanup() {
|
||||||
m.searcherLock.Unlock()
|
m.searcherLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.hasMetadater {
|
|
||||||
m.metadaterLock.Lock()
|
|
||||||
logger.Debug(logSender, "", "cleanup metadater plugin %v", m.metadater.config.Cmd)
|
|
||||||
m.metadater.cleanup()
|
|
||||||
m.metadaterLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.hasIPFilter {
|
if m.hasIPFilter {
|
||||||
m.ipFilterLock.Lock()
|
m.ipFilterLock.Lock()
|
||||||
logger.Debug(logSender, "", "cleanup IP filter plugin %v", m.filter.config.Cmd)
|
logger.Debug(logSender, "", "cleanup IP filter plugin %v", m.filter.config.Cmd)
|
||||||
|
|
|
@ -269,7 +269,6 @@ const (
|
||||||
I18nActionTypeFolderQuotaReset = "actions.types.folder_quota_reset"
|
I18nActionTypeFolderQuotaReset = "actions.types.folder_quota_reset"
|
||||||
I18nActionTypeTransferQuotaReset = "actions.types.transfer_quota_reset"
|
I18nActionTypeTransferQuotaReset = "actions.types.transfer_quota_reset"
|
||||||
I18nActionTypeDataRetentionCheck = "actions.types.data_retention_check"
|
I18nActionTypeDataRetentionCheck = "actions.types.data_retention_check"
|
||||||
I18nActionTypeMetadataCheck = "actions.types.metadata_check"
|
|
||||||
I18nActionTypeFilesystem = "actions.types.filesystem"
|
I18nActionTypeFilesystem = "actions.types.filesystem"
|
||||||
I18nActionTypePwdExpirationCheck = "actions.types.password_expiration_check"
|
I18nActionTypePwdExpirationCheck = "actions.types.password_expiration_check"
|
||||||
I18nActionTypeUserExpirationCheck = "actions.types.user_expiration_check"
|
I18nActionTypeUserExpirationCheck = "actions.types.user_expiration_check"
|
||||||
|
|
|
@ -46,7 +46,6 @@ import (
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/logger"
|
"github.com/drakkan/sftpgo/v2/internal/logger"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/metric"
|
"github.com/drakkan/sftpgo/v2/internal/metric"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/plugin"
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/util"
|
"github.com/drakkan/sftpgo/v2/internal/util"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/version"
|
"github.com/drakkan/sftpgo/v2/internal/version"
|
||||||
)
|
)
|
||||||
|
@ -171,10 +170,10 @@ func (fs *AzureBlobFs) ConnectionID() string {
|
||||||
// Stat returns a FileInfo describing the named file
|
// Stat returns a FileInfo describing the named file
|
||||||
func (fs *AzureBlobFs) Stat(name string) (os.FileInfo, error) {
|
func (fs *AzureBlobFs) Stat(name string) (os.FileInfo, error) {
|
||||||
if name == "" || name == "/" || name == "." {
|
if name == "" || name == "/" || name == "." {
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
|
return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
|
||||||
}
|
}
|
||||||
if fs.config.KeyPrefix == name+"/" {
|
if fs.config.KeyPrefix == name+"/" {
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
|
return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
attrs, err := fs.headObject(name)
|
attrs, err := fs.headObject(name)
|
||||||
|
@ -182,9 +181,7 @@ func (fs *AzureBlobFs) Stat(name string) (os.FileInfo, error) {
|
||||||
contentType := util.GetStringFromPointer(attrs.ContentType)
|
contentType := util.GetStringFromPointer(attrs.ContentType)
|
||||||
isDir := checkDirectoryMarkers(contentType, attrs.Metadata)
|
isDir := checkDirectoryMarkers(contentType, attrs.Metadata)
|
||||||
metric.AZListObjectsCompleted(nil)
|
metric.AZListObjectsCompleted(nil)
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, isDir,
|
return NewFileInfo(name, isDir, util.GetIntFromPointer(attrs.ContentLength), util.GetTimeFromPointer(attrs.LastModified), false), nil
|
||||||
util.GetIntFromPointer(attrs.ContentLength),
|
|
||||||
util.GetTimeFromPointer(attrs.LastModified), false))
|
|
||||||
}
|
}
|
||||||
if !fs.IsNotExist(err) {
|
if !fs.IsNotExist(err) {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -195,7 +192,7 @@ func (fs *AzureBlobFs) Stat(name string) (os.FileInfo, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if hasContents {
|
if hasContents {
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
|
return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
|
||||||
}
|
}
|
||||||
return nil, os.ErrNotExist
|
return nil, os.ErrNotExist
|
||||||
}
|
}
|
||||||
|
@ -347,11 +344,6 @@ func (fs *AzureBlobFs) Remove(name string, isDir bool) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
metric.AZDeleteObjectCompleted(err)
|
metric.AZDeleteObjectCompleted(err)
|
||||||
if plugin.Handler.HasMetadater() && err == nil && !isDir {
|
|
||||||
if errMetadata := plugin.Handler.RemoveMetadata(fs.getStorageID(), ensureAbsPath(name)); errMetadata != nil {
|
|
||||||
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %q: %+v", name, errMetadata)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -385,22 +377,8 @@ func (*AzureBlobFs) Chmod(_ string, _ os.FileMode) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chtimes changes the access and modification times of the named file.
|
// Chtimes changes the access and modification times of the named file.
|
||||||
func (fs *AzureBlobFs) Chtimes(name string, _, mtime time.Time, isUploading bool) error {
|
func (fs *AzureBlobFs) Chtimes(_ string, _, _ time.Time, _ bool) error {
|
||||||
if !plugin.Handler.HasMetadater() {
|
return ErrVfsUnsupported
|
||||||
return ErrVfsUnsupported
|
|
||||||
}
|
|
||||||
if !isUploading {
|
|
||||||
info, err := fs.Stat(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if info.IsDir() {
|
|
||||||
return ErrVfsUnsupported
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return plugin.Handler.SetModificationTime(fs.getStorageID(), ensureAbsPath(name),
|
|
||||||
util.GetTimeAsMsSinceEpoch(mtime))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncate changes the size of the named file.
|
// Truncate changes the size of the named file.
|
||||||
|
@ -509,53 +487,6 @@ func (fs *AzureBlobFs) ScanRootDirContents() (int, int64, error) {
|
||||||
return fs.GetDirSize(fs.config.KeyPrefix)
|
return fs.GetDirSize(fs.config.KeyPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *AzureBlobFs) getFileNamesInPrefix(fsPrefix string) (map[string]bool, error) {
|
|
||||||
fileNames := make(map[string]bool)
|
|
||||||
prefix := ""
|
|
||||||
if fsPrefix != "/" {
|
|
||||||
prefix = strings.TrimPrefix(fsPrefix, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
pager := fs.containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{
|
|
||||||
Include: container.ListBlobsInclude{
|
|
||||||
//Metadata: true,
|
|
||||||
},
|
|
||||||
Prefix: &prefix,
|
|
||||||
MaxResults: &azureBlobDefaultPageSize,
|
|
||||||
})
|
|
||||||
|
|
||||||
for pager.More() {
|
|
||||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
|
||||||
defer cancelFn()
|
|
||||||
|
|
||||||
resp, err := pager.NextPage(ctx)
|
|
||||||
if err != nil {
|
|
||||||
metric.AZListObjectsCompleted(err)
|
|
||||||
return fileNames, err
|
|
||||||
}
|
|
||||||
for _, blobItem := range resp.ListBlobsHierarchySegmentResponse.Segment.BlobItems {
|
|
||||||
name := util.GetStringFromPointer(blobItem.Name)
|
|
||||||
name = strings.TrimPrefix(name, prefix)
|
|
||||||
if blobItem.Properties != nil {
|
|
||||||
contentType := util.GetStringFromPointer(blobItem.Properties.ContentType)
|
|
||||||
isDir := checkDirectoryMarkers(contentType, blobItem.Metadata)
|
|
||||||
if isDir {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fileNames[name] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metric.AZListObjectsCompleted(nil)
|
|
||||||
|
|
||||||
return fileNames, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckMetadata checks the metadata consistency
|
|
||||||
func (fs *AzureBlobFs) CheckMetadata() error {
|
|
||||||
return fsMetadataCheck(fs, fs.getStorageID(), fs.config.KeyPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDirSize returns the number of files and the size for a folder
|
// GetDirSize returns the number of files and the size for a folder
|
||||||
// including any subfolders
|
// including any subfolders
|
||||||
func (fs *AzureBlobFs) GetDirSize(dirname string) (int, int64, error) {
|
func (fs *AzureBlobFs) GetDirSize(dirname string) (int, int64, error) {
|
||||||
|
@ -848,16 +779,6 @@ func (fs *AzureBlobFs) renameInternal(source, target string, fi os.FileInfo, rec
|
||||||
}
|
}
|
||||||
numFiles++
|
numFiles++
|
||||||
filesSize += fi.Size()
|
filesSize += fi.Size()
|
||||||
if plugin.Handler.HasMetadater() {
|
|
||||||
if !fi.IsDir() {
|
|
||||||
err := plugin.Handler.SetModificationTime(fs.getStorageID(), ensureAbsPath(target),
|
|
||||||
util.GetTimeAsMsSinceEpoch(fi.ModTime()))
|
|
||||||
if err != nil {
|
|
||||||
fsLog(fs, logger.LevelWarn, "unable to preserve modification time after renaming %q -> %q: %+v",
|
|
||||||
source, target, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
err := fs.skipNotExistErr(fs.Remove(source, fi.IsDir()))
|
err := fs.skipNotExistErr(fs.Remove(source, fi.IsDir()))
|
||||||
return numFiles, filesSize, err
|
return numFiles, filesSize, err
|
||||||
|
@ -1162,16 +1083,6 @@ func (fs *AzureBlobFs) downloadToWriter(name string, w PipeWriter) (int64, error
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *AzureBlobFs) getStorageID() string {
|
|
||||||
if fs.config.Endpoint != "" {
|
|
||||||
if !strings.HasSuffix(fs.config.Endpoint, "/") {
|
|
||||||
return fmt.Sprintf("azblob://%v/%v", fs.config.Endpoint, fs.config.Container)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("azblob://%v%v", fs.config.Endpoint, fs.config.Container)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("azblob://%v", fs.config.Container)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDirectoryMarkers(contentType string, metadata map[string]*string) bool {
|
func checkDirectoryMarkers(contentType string, metadata map[string]*string) bool {
|
||||||
if contentType == dirMimeType {
|
if contentType == dirMimeType {
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -211,18 +211,6 @@ func (v *VirtualFolder) GetFilesystem(connectionID string, forbiddenSelfUsers []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckMetadataConsistency checks the consistency between the metadata stored
|
|
||||||
// in the configured metadata plugin and the filesystem
|
|
||||||
func (v *VirtualFolder) CheckMetadataConsistency() error {
|
|
||||||
fs, err := v.GetFilesystem(xid.New().String(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer fs.Close()
|
|
||||||
|
|
||||||
return fs.CheckMetadata()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScanQuota scans the folder and returns the number of files and their size
|
// ScanQuota scans the folder and returns the number of files and their size
|
||||||
func (v *VirtualFolder) ScanQuota() (int, int64, error) {
|
func (v *VirtualFolder) ScanQuota() (int, int64, error) {
|
||||||
if v.hasPathPlaceholder() {
|
if v.hasPathPlaceholder() {
|
||||||
|
|
|
@ -39,8 +39,6 @@ import (
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/logger"
|
"github.com/drakkan/sftpgo/v2/internal/logger"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/metric"
|
"github.com/drakkan/sftpgo/v2/internal/metric"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/plugin"
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/util"
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/version"
|
"github.com/drakkan/sftpgo/v2/internal/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -112,10 +110,10 @@ func (fs *GCSFs) ConnectionID() string {
|
||||||
// Stat returns a FileInfo describing the named file
|
// Stat returns a FileInfo describing the named file
|
||||||
func (fs *GCSFs) Stat(name string) (os.FileInfo, error) {
|
func (fs *GCSFs) Stat(name string) (os.FileInfo, error) {
|
||||||
if name == "" || name == "/" || name == "." {
|
if name == "" || name == "/" || name == "." {
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
|
return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
|
||||||
}
|
}
|
||||||
if fs.config.KeyPrefix == name+"/" {
|
if fs.config.KeyPrefix == name+"/" {
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
|
return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
|
||||||
}
|
}
|
||||||
return fs.getObjectStat(name)
|
return fs.getObjectStat(name)
|
||||||
}
|
}
|
||||||
|
@ -304,11 +302,6 @@ func (fs *GCSFs) Remove(name string, isDir bool) error {
|
||||||
err = fs.svc.Bucket(fs.config.Bucket).Object(strings.TrimSuffix(name, "/")).Delete(ctx)
|
err = fs.svc.Bucket(fs.config.Bucket).Object(strings.TrimSuffix(name, "/")).Delete(ctx)
|
||||||
}
|
}
|
||||||
metric.GCSDeleteObjectCompleted(err)
|
metric.GCSDeleteObjectCompleted(err)
|
||||||
if plugin.Handler.HasMetadater() && err == nil && !isDir {
|
|
||||||
if errMetadata := plugin.Handler.RemoveMetadata(fs.getStorageID(), ensureAbsPath(name)); errMetadata != nil {
|
|
||||||
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %q: %+v", name, errMetadata)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -342,22 +335,8 @@ func (*GCSFs) Chmod(_ string, _ os.FileMode) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chtimes changes the access and modification times of the named file.
|
// Chtimes changes the access and modification times of the named file.
|
||||||
func (fs *GCSFs) Chtimes(name string, _, mtime time.Time, isUploading bool) error {
|
func (fs *GCSFs) Chtimes(_ string, _, _ time.Time, _ bool) error {
|
||||||
if !plugin.Handler.HasMetadater() {
|
return ErrVfsUnsupported
|
||||||
return ErrVfsUnsupported
|
|
||||||
}
|
|
||||||
if !isUploading {
|
|
||||||
info, err := fs.Stat(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if info.IsDir() {
|
|
||||||
return ErrVfsUnsupported
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return plugin.Handler.SetModificationTime(fs.getStorageID(), ensureAbsPath(name),
|
|
||||||
util.GetTimeAsMsSinceEpoch(mtime))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncate changes the size of the named file.
|
// Truncate changes the size of the named file.
|
||||||
|
@ -459,67 +438,6 @@ func (fs *GCSFs) ScanRootDirContents() (int, int64, error) {
|
||||||
return fs.GetDirSize(fs.config.KeyPrefix)
|
return fs.GetDirSize(fs.config.KeyPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *GCSFs) getFileNamesInPrefix(fsPrefix string) (map[string]bool, error) {
|
|
||||||
fileNames := make(map[string]bool)
|
|
||||||
prefix := ""
|
|
||||||
if fsPrefix != "/" {
|
|
||||||
prefix = strings.TrimPrefix(fsPrefix, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
query := &storage.Query{
|
|
||||||
Prefix: prefix,
|
|
||||||
Delimiter: "/",
|
|
||||||
}
|
|
||||||
err := query.SetAttrSelection(gcsDefaultFieldsSelection)
|
|
||||||
if err != nil {
|
|
||||||
return fileNames, err
|
|
||||||
}
|
|
||||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxLongTimeout))
|
|
||||||
defer cancelFn()
|
|
||||||
|
|
||||||
bkt := fs.svc.Bucket(fs.config.Bucket)
|
|
||||||
it := bkt.Objects(ctx, query)
|
|
||||||
pager := iterator.NewPager(it, defaultGCSPageSize, "")
|
|
||||||
|
|
||||||
for {
|
|
||||||
var objects []*storage.ObjectAttrs
|
|
||||||
pageToken, err := pager.NextPage(&objects)
|
|
||||||
if err != nil {
|
|
||||||
metric.GCSListObjectsCompleted(err)
|
|
||||||
return fileNames, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, attrs := range objects {
|
|
||||||
if !attrs.Deleted.IsZero() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if attrs.Prefix == "" {
|
|
||||||
name, isDir := fs.resolve(attrs.Name, prefix, attrs.ContentType)
|
|
||||||
if name == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if isDir {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fileNames[name] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
objects = nil
|
|
||||||
if pageToken == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
metric.GCSListObjectsCompleted(nil)
|
|
||||||
return fileNames, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckMetadata checks the metadata consistency
|
|
||||||
func (fs *GCSFs) CheckMetadata() error {
|
|
||||||
return fsMetadataCheck(fs, fs.getStorageID(), fs.config.KeyPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDirSize returns the number of files and the size for a folder
|
// GetDirSize returns the number of files and the size for a folder
|
||||||
// including any subfolders
|
// including any subfolders
|
||||||
func (fs *GCSFs) GetDirSize(dirname string) (int, int64, error) {
|
func (fs *GCSFs) GetDirSize(dirname string) (int, int64, error) {
|
||||||
|
@ -698,7 +616,7 @@ func (fs *GCSFs) getObjectStat(name string) (os.FileInfo, error) {
|
||||||
objSize := attrs.Size
|
objSize := attrs.Size
|
||||||
objectModTime := attrs.Updated
|
objectModTime := attrs.Updated
|
||||||
isDir := attrs.ContentType == dirMimeType || strings.HasSuffix(attrs.Name, "/")
|
isDir := attrs.ContentType == dirMimeType || strings.HasSuffix(attrs.Name, "/")
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, isDir, objSize, objectModTime, false))
|
return NewFileInfo(name, isDir, objSize, objectModTime, false), nil
|
||||||
}
|
}
|
||||||
if !fs.IsNotExist(err) {
|
if !fs.IsNotExist(err) {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -709,14 +627,14 @@ func (fs *GCSFs) getObjectStat(name string) (os.FileInfo, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if hasContents {
|
if hasContents {
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
|
return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
|
||||||
}
|
}
|
||||||
// finally check if this is an object with a trailing /
|
// finally check if this is an object with a trailing /
|
||||||
attrs, err = fs.headObject(name + "/")
|
attrs, err = fs.headObject(name + "/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, attrs.Size, attrs.Updated, false))
|
return NewFileInfo(name, true, attrs.Size, attrs.Updated, false), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *GCSFs) setWriterAttrs(objectWriter *storage.Writer, flag int, name string) {
|
func (fs *GCSFs) setWriterAttrs(objectWriter *storage.Writer, flag int, name string) {
|
||||||
|
@ -827,14 +745,6 @@ func (fs *GCSFs) renameInternal(source, target string, fi os.FileInfo, recursion
|
||||||
}
|
}
|
||||||
numFiles++
|
numFiles++
|
||||||
filesSize += fi.Size()
|
filesSize += fi.Size()
|
||||||
if plugin.Handler.HasMetadater() {
|
|
||||||
err := plugin.Handler.SetModificationTime(fs.getStorageID(), ensureAbsPath(target),
|
|
||||||
util.GetTimeAsMsSinceEpoch(fi.ModTime()))
|
|
||||||
if err != nil {
|
|
||||||
fsLog(fs, logger.LevelWarn, "unable to preserve modification time after renaming %q -> %q: %+v",
|
|
||||||
source, target, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
err := fs.Remove(source, fi.IsDir())
|
err := fs.Remove(source, fi.IsDir())
|
||||||
if fs.IsNotExist(err) {
|
if fs.IsNotExist(err) {
|
||||||
|
@ -938,10 +848,6 @@ func (*GCSFs) getTempObject(name string) string {
|
||||||
return filepath.Join(dir, ".sftpgo-partial."+guid+"."+filepath.Base(name))
|
return filepath.Join(dir, ".sftpgo-partial."+guid+"."+filepath.Base(name))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *GCSFs) getStorageID() string {
|
|
||||||
return fmt.Sprintf("gs://%v", fs.config.Bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
type gcsDirLister struct {
|
type gcsDirLister struct {
|
||||||
baseDirLister
|
baseDirLister
|
||||||
bucket *storage.BucketHandle
|
bucket *storage.BucketHandle
|
||||||
|
|
|
@ -50,7 +50,6 @@ import (
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/logger"
|
"github.com/drakkan/sftpgo/v2/internal/logger"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/metric"
|
"github.com/drakkan/sftpgo/v2/internal/metric"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/plugin"
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/util"
|
"github.com/drakkan/sftpgo/v2/internal/util"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/version"
|
"github.com/drakkan/sftpgo/v2/internal/version"
|
||||||
)
|
)
|
||||||
|
@ -148,10 +147,10 @@ func (fs *S3Fs) ConnectionID() string {
|
||||||
func (fs *S3Fs) Stat(name string) (os.FileInfo, error) {
|
func (fs *S3Fs) Stat(name string) (os.FileInfo, error) {
|
||||||
var result *FileInfo
|
var result *FileInfo
|
||||||
if name == "" || name == "/" || name == "." {
|
if name == "" || name == "/" || name == "." {
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
|
return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
|
||||||
}
|
}
|
||||||
if fs.config.KeyPrefix == name+"/" {
|
if fs.config.KeyPrefix == name+"/" {
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
|
return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
|
||||||
}
|
}
|
||||||
obj, err := fs.headObject(name)
|
obj, err := fs.headObject(name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -162,8 +161,7 @@ func (fs *S3Fs) Stat(name string) (os.FileInfo, error) {
|
||||||
_, err = fs.headObject(name + "/")
|
_, err = fs.headObject(name + "/")
|
||||||
isDir = err == nil
|
isDir = err == nil
|
||||||
}
|
}
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, isDir, util.GetIntFromPointer(obj.ContentLength),
|
return NewFileInfo(name, isDir, util.GetIntFromPointer(obj.ContentLength), util.GetTimeFromPointer(obj.LastModified), false), nil
|
||||||
util.GetTimeFromPointer(obj.LastModified), false))
|
|
||||||
}
|
}
|
||||||
if !fs.IsNotExist(err) {
|
if !fs.IsNotExist(err) {
|
||||||
return result, err
|
return result, err
|
||||||
|
@ -171,7 +169,7 @@ func (fs *S3Fs) Stat(name string) (os.FileInfo, error) {
|
||||||
// now check if this is a prefix (virtual directory)
|
// now check if this is a prefix (virtual directory)
|
||||||
hasContents, err := fs.hasContents(name)
|
hasContents, err := fs.hasContents(name)
|
||||||
if err == nil && hasContents {
|
if err == nil && hasContents {
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, 0, time.Unix(0, 0), false))
|
return NewFileInfo(name, true, 0, time.Unix(0, 0), false), nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -188,8 +186,7 @@ func (fs *S3Fs) getStatForDir(name string) (os.FileInfo, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
return updateFileInfoModTime(fs.getStorageID(), name, NewFileInfo(name, true, util.GetIntFromPointer(obj.ContentLength),
|
return NewFileInfo(name, true, util.GetIntFromPointer(obj.ContentLength), util.GetTimeFromPointer(obj.LastModified), false), nil
|
||||||
util.GetTimeFromPointer(obj.LastModified), false))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lstat returns a FileInfo describing the named file
|
// Lstat returns a FileInfo describing the named file
|
||||||
|
@ -364,11 +361,6 @@ func (fs *S3Fs) Remove(name string, isDir bool) error {
|
||||||
Key: aws.String(name),
|
Key: aws.String(name),
|
||||||
})
|
})
|
||||||
metric.S3DeleteObjectCompleted(err)
|
metric.S3DeleteObjectCompleted(err)
|
||||||
if plugin.Handler.HasMetadater() && err == nil && !isDir {
|
|
||||||
if errMetadata := plugin.Handler.RemoveMetadata(fs.getStorageID(), ensureAbsPath(name)); errMetadata != nil {
|
|
||||||
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %q: %+v", name, errMetadata)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,21 +394,8 @@ func (*S3Fs) Chmod(_ string, _ os.FileMode) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chtimes changes the access and modification times of the named file.
|
// Chtimes changes the access and modification times of the named file.
|
||||||
func (fs *S3Fs) Chtimes(name string, _, mtime time.Time, isUploading bool) error {
|
func (fs *S3Fs) Chtimes(_ string, _, _ time.Time, _ bool) error {
|
||||||
if !plugin.Handler.HasMetadater() {
|
return ErrVfsUnsupported
|
||||||
return ErrVfsUnsupported
|
|
||||||
}
|
|
||||||
if !isUploading {
|
|
||||||
info, err := fs.Stat(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if info.IsDir() {
|
|
||||||
return ErrVfsUnsupported
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return plugin.Handler.SetModificationTime(fs.getStorageID(), ensureAbsPath(name),
|
|
||||||
util.GetTimeAsMsSinceEpoch(mtime))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncate changes the size of the named file.
|
// Truncate changes the size of the named file.
|
||||||
|
@ -519,50 +498,6 @@ func (fs *S3Fs) ScanRootDirContents() (int, int64, error) {
|
||||||
return fs.GetDirSize(fs.config.KeyPrefix)
|
return fs.GetDirSize(fs.config.KeyPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *S3Fs) getFileNamesInPrefix(fsPrefix string) (map[string]bool, error) {
|
|
||||||
fileNames := make(map[string]bool)
|
|
||||||
prefix := ""
|
|
||||||
if fsPrefix != "/" {
|
|
||||||
prefix = strings.TrimPrefix(fsPrefix, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
paginator := s3.NewListObjectsV2Paginator(fs.svc, &s3.ListObjectsV2Input{
|
|
||||||
Bucket: aws.String(fs.config.Bucket),
|
|
||||||
Prefix: aws.String(prefix),
|
|
||||||
Delimiter: aws.String("/"),
|
|
||||||
MaxKeys: &s3DefaultPageSize,
|
|
||||||
})
|
|
||||||
|
|
||||||
for paginator.HasMorePages() {
|
|
||||||
ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(fs.ctxTimeout))
|
|
||||||
defer cancelFn()
|
|
||||||
|
|
||||||
page, err := paginator.NextPage(ctx)
|
|
||||||
if err != nil {
|
|
||||||
metric.S3ListObjectsCompleted(err)
|
|
||||||
if err != nil {
|
|
||||||
fsLog(fs, logger.LevelError, "unable to get content for prefix %q: %+v", prefix, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return fileNames, err
|
|
||||||
}
|
|
||||||
for _, fileObject := range page.Contents {
|
|
||||||
name, isDir := fs.resolve(fileObject.Key, prefix)
|
|
||||||
if name != "" && !isDir {
|
|
||||||
fileNames[name] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
metric.S3ListObjectsCompleted(nil)
|
|
||||||
return fileNames, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckMetadata checks the metadata consistency
|
|
||||||
func (fs *S3Fs) CheckMetadata() error {
|
|
||||||
return fsMetadataCheck(fs, fs.getStorageID(), fs.config.KeyPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDirSize returns the number of files and the size for a folder
|
// GetDirSize returns the number of files and the size for a folder
|
||||||
// including any subfolders
|
// including any subfolders
|
||||||
func (fs *S3Fs) GetDirSize(dirname string) (int, int64, error) {
|
func (fs *S3Fs) GetDirSize(dirname string) (int, int64, error) {
|
||||||
|
@ -787,14 +722,6 @@ func (fs *S3Fs) renameInternal(source, target string, fi os.FileInfo, recursion
|
||||||
}
|
}
|
||||||
numFiles++
|
numFiles++
|
||||||
filesSize += fi.Size()
|
filesSize += fi.Size()
|
||||||
if plugin.Handler.HasMetadater() {
|
|
||||||
err := plugin.Handler.SetModificationTime(fs.getStorageID(), ensureAbsPath(target),
|
|
||||||
util.GetTimeAsMsSinceEpoch(fi.ModTime()))
|
|
||||||
if err != nil {
|
|
||||||
fsLog(fs, logger.LevelWarn, "unable to preserve modification time after renaming %q -> %q: %+v",
|
|
||||||
source, target, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
err := fs.Remove(source, fi.IsDir())
|
err := fs.Remove(source, fi.IsDir())
|
||||||
if fs.IsNotExist(err) {
|
if fs.IsNotExist(err) {
|
||||||
|
@ -1051,16 +978,6 @@ func (fs *S3Fs) downloadToWriter(name string, w PipeWriter) (int64, error) {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *S3Fs) getStorageID() string {
|
|
||||||
if fs.config.Endpoint != "" {
|
|
||||||
if !strings.HasSuffix(fs.config.Endpoint, "/") {
|
|
||||||
return fmt.Sprintf("s3://%v/%v", fs.config.Endpoint, fs.config.Bucket)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("s3://%v%v", fs.config.Endpoint, fs.config.Bucket)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("s3://%v", fs.config.Bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
type s3DirLister struct {
|
type s3DirLister struct {
|
||||||
baseDirLister
|
baseDirLister
|
||||||
paginator *s3.ListObjectsV2Paginator
|
paginator *s3.ListObjectsV2Paginator
|
||||||
|
|
|
@ -31,11 +31,9 @@ import (
|
||||||
"github.com/eikenb/pipeat"
|
"github.com/eikenb/pipeat"
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
"github.com/sftpgo/sdk"
|
"github.com/sftpgo/sdk"
|
||||||
"github.com/sftpgo/sdk/plugin/metadata"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/kms"
|
"github.com/drakkan/sftpgo/v2/internal/kms"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/logger"
|
"github.com/drakkan/sftpgo/v2/internal/logger"
|
||||||
"github.com/drakkan/sftpgo/v2/internal/plugin"
|
|
||||||
"github.com/drakkan/sftpgo/v2/internal/util"
|
"github.com/drakkan/sftpgo/v2/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -147,7 +145,6 @@ type Fs interface {
|
||||||
HasVirtualFolders() bool
|
HasVirtualFolders() bool
|
||||||
GetMimeType(name string) (string, error)
|
GetMimeType(name string) (string, error)
|
||||||
GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error)
|
GetAvailableDiskSize(dirName string) (*sftp.StatVFS, error)
|
||||||
CheckMetadata() error
|
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,13 +154,6 @@ type FsRealPather interface {
|
||||||
RealPath(p string) (string, error)
|
RealPath(p string) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fsMetadataChecker is a Fs that implements the getFileNamesInPrefix method.
|
|
||||||
// This interface is used to abstract metadata consistency checks
|
|
||||||
type fsMetadataChecker interface {
|
|
||||||
Fs
|
|
||||||
getFileNamesInPrefix(fsPrefix string) (map[string]bool, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FsFileCopier is a Fs that implements the CopyFile method.
|
// FsFileCopier is a Fs that implements the CopyFile method.
|
||||||
type FsFileCopier interface {
|
type FsFileCopier interface {
|
||||||
Fs
|
Fs
|
||||||
|
@ -1090,90 +1080,6 @@ func IsUploadResumeSupported(fs Fs, size int64) bool {
|
||||||
return fs.IsConditionalUploadResumeSupported(size)
|
return fs.IsConditionalUploadResumeSupported(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateFileInfoModTime(storageID, objectPath string, info *FileInfo) (*FileInfo, error) {
|
|
||||||
if !plugin.Handler.HasMetadater() {
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
if info.IsDir() {
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
mTime, err := plugin.Handler.GetModificationTime(storageID, ensureAbsPath(objectPath), info.IsDir())
|
|
||||||
if errors.Is(err, metadata.ErrNoSuchObject) {
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return info, err
|
|
||||||
}
|
|
||||||
info.modTime = util.GetTimeFromMsecSinceEpoch(mTime)
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureAbsPath(name string) string {
|
|
||||||
if path.IsAbs(name) {
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
return path.Join("/", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func fsMetadataCheck(fs fsMetadataChecker, storageID, keyPrefix string) error {
|
|
||||||
if !plugin.Handler.HasMetadater() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
limit := 100
|
|
||||||
from := ""
|
|
||||||
for {
|
|
||||||
metadataFolders, err := plugin.Handler.GetMetadataFolders(storageID, from, limit)
|
|
||||||
if err != nil {
|
|
||||||
fsLog(fs, logger.LevelError, "unable to get folders: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, folder := range metadataFolders {
|
|
||||||
from = folder
|
|
||||||
fsPrefix := folder
|
|
||||||
if !strings.HasSuffix(folder, "/") {
|
|
||||||
fsPrefix += "/"
|
|
||||||
}
|
|
||||||
if keyPrefix != "" {
|
|
||||||
if !strings.HasPrefix(fsPrefix, "/"+keyPrefix) {
|
|
||||||
fsLog(fs, logger.LevelDebug, "skip metadata check for folder %q outside prefix %q",
|
|
||||||
folder, keyPrefix)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fsLog(fs, logger.LevelDebug, "check metadata for folder %q", folder)
|
|
||||||
metadataValues, err := plugin.Handler.GetModificationTimes(storageID, folder)
|
|
||||||
if err != nil {
|
|
||||||
fsLog(fs, logger.LevelError, "unable to get modification times for folder %q: %v", folder, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(metadataValues) == 0 {
|
|
||||||
fsLog(fs, logger.LevelDebug, "no metadata for folder %q", folder)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fileNames, err := fs.getFileNamesInPrefix(fsPrefix)
|
|
||||||
if err != nil {
|
|
||||||
fsLog(fs, logger.LevelError, "unable to get content for prefix %q: %v", fsPrefix, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// now check if we have metadata for a missing object
|
|
||||||
for k := range metadataValues {
|
|
||||||
if _, ok := fileNames[k]; !ok {
|
|
||||||
filePath := ensureAbsPath(path.Join(folder, k))
|
|
||||||
if err = plugin.Handler.RemoveMetadata(storageID, filePath); err != nil {
|
|
||||||
fsLog(fs, logger.LevelError, "unable to remove metadata for missing file %q: %v", filePath, err)
|
|
||||||
} else {
|
|
||||||
fsLog(fs, logger.LevelDebug, "metadata removed for missing file %q", filePath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(metadataFolders) < limit {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateOSFsConfig(config *sdk.OSFsConfig) error {
|
func validateOSFsConfig(config *sdk.OSFsConfig) error {
|
||||||
if config.ReadBufferSize < 0 || config.ReadBufferSize > 10 {
|
if config.ReadBufferSize < 0 || config.ReadBufferSize > 10 {
|
||||||
return fmt.Errorf("invalid read buffer size must be between 0 and 10 MB")
|
return fmt.Errorf("invalid read buffer size must be between 0 and 10 MB")
|
||||||
|
|
|
@ -1064,67 +1064,6 @@ paths:
|
||||||
$ref: '#/components/responses/InternalServerError'
|
$ref: '#/components/responses/InternalServerError'
|
||||||
default:
|
default:
|
||||||
$ref: '#/components/responses/DefaultResponse'
|
$ref: '#/components/responses/DefaultResponse'
|
||||||
/metadata/users/checks:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- metadata
|
|
||||||
summary: Get metadata checks
|
|
||||||
description: Returns the active metadata checks
|
|
||||||
operationId: get_users_metadata_checks
|
|
||||||
responses:
|
|
||||||
'200':
|
|
||||||
description: successful operation
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/MetadataCheck'
|
|
||||||
'401':
|
|
||||||
$ref: '#/components/responses/Unauthorized'
|
|
||||||
'403':
|
|
||||||
$ref: '#/components/responses/Forbidden'
|
|
||||||
'500':
|
|
||||||
$ref: '#/components/responses/InternalServerError'
|
|
||||||
default:
|
|
||||||
$ref: '#/components/responses/DefaultResponse'
|
|
||||||
/metadata/users/{username}/check:
|
|
||||||
parameters:
|
|
||||||
- name: username
|
|
||||||
in: path
|
|
||||||
description: the username
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
post:
|
|
||||||
tags:
|
|
||||||
- metadata
|
|
||||||
summary: Start a metadata check
|
|
||||||
description: 'Starts a new metadata check for the given user. A metadata check requires a metadata plugin and removes the metadata associated to missing items (for example objects deleted outside SFTPGo). If a metadata check for this user is already active a 409 status code is returned. Metadata are stored for cloud storage backends. This API does nothing for other backends or if no metadata plugin is configured'
|
|
||||||
operationId: start_user_metadata_check
|
|
||||||
responses:
|
|
||||||
'202':
|
|
||||||
description: successful operation
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
$ref: '#/components/schemas/ApiResponse'
|
|
||||||
example:
|
|
||||||
message: Check started
|
|
||||||
'400':
|
|
||||||
$ref: '#/components/responses/BadRequest'
|
|
||||||
'401':
|
|
||||||
$ref: '#/components/responses/Unauthorized'
|
|
||||||
'403':
|
|
||||||
$ref: '#/components/responses/Forbidden'
|
|
||||||
'404':
|
|
||||||
$ref: '#/components/responses/NotFound'
|
|
||||||
'409':
|
|
||||||
$ref: '#/components/responses/Conflict'
|
|
||||||
'500':
|
|
||||||
$ref: '#/components/responses/InternalServerError'
|
|
||||||
default:
|
|
||||||
$ref: '#/components/responses/DefaultResponse'
|
|
||||||
/retention/users/checks:
|
/retention/users/checks:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
|
@ -5003,7 +4942,6 @@ components:
|
||||||
- manage_defender
|
- manage_defender
|
||||||
- view_defender
|
- view_defender
|
||||||
- retention_checks
|
- retention_checks
|
||||||
- metadata_checks
|
|
||||||
- view_events
|
- view_events
|
||||||
- manage_event_rules
|
- manage_event_rules
|
||||||
- manage_roles
|
- manage_roles
|
||||||
|
@ -5027,7 +4965,6 @@ components:
|
||||||
* `manage_defender` - remove ip from the dynamic blocklist is allowed
|
* `manage_defender` - remove ip from the dynamic blocklist is allowed
|
||||||
* `view_defender` - list the dynamic blocklist is allowed
|
* `view_defender` - list the dynamic blocklist is allowed
|
||||||
* `retention_checks` - view and start retention checks is allowed
|
* `retention_checks` - view and start retention checks is allowed
|
||||||
* `metadata_checks` - view and start metadata checks is allowed
|
|
||||||
* `view_events` - view and search filesystem and provider events is allowed
|
* `view_events` - view and search filesystem and provider events is allowed
|
||||||
* `manage_event_rules` - manage event actions and rules is allowed
|
* `manage_event_rules` - manage event actions and rules is allowed
|
||||||
* `manage_roles` - manage roles is allowed
|
* `manage_roles` - manage roles is allowed
|
||||||
|
@ -5078,7 +5015,6 @@ components:
|
||||||
* `7` - Transfer quota reset
|
* `7` - Transfer quota reset
|
||||||
* `8` - Data retention check
|
* `8` - Data retention check
|
||||||
* `9` - Filesystem
|
* `9` - Filesystem
|
||||||
* `10` - Metadata check
|
|
||||||
* `11` - Password expiration check
|
* `11` - Password expiration check
|
||||||
* `12` - User expiration check
|
* `12` - User expiration check
|
||||||
* `13` - Identity Provider account check
|
* `13` - Identity Provider account check
|
||||||
|
@ -6322,16 +6258,6 @@ components:
|
||||||
type: string
|
type: string
|
||||||
format: email
|
format: email
|
||||||
description: 'if the notification method is set to "Email", this is the e-mail address that receives the retention check report. This field is automatically set to the email address associated with the administrator starting the check'
|
description: 'if the notification method is set to "Email", this is the e-mail address that receives the retention check report. This field is automatically set to the email address associated with the administrator starting the check'
|
||||||
MetadataCheck:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
username:
|
|
||||||
type: string
|
|
||||||
description: username to which the check refers
|
|
||||||
start_time:
|
|
||||||
type: integer
|
|
||||||
format: int64
|
|
||||||
description: check start time as unix timestamp in milliseconds
|
|
||||||
QuotaScan:
|
QuotaScan:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
@ -260,7 +260,8 @@
|
||||||
"day_of_month": "Day of month",
|
"day_of_month": "Day of month",
|
||||||
"month": "Month",
|
"month": "Month",
|
||||||
"options": "Options",
|
"options": "Options",
|
||||||
"expired": "Expired"
|
"expired": "Expired",
|
||||||
|
"unsupported": "Feature no longer supported"
|
||||||
},
|
},
|
||||||
"fs": {
|
"fs": {
|
||||||
"view_file": "View file \"{{- path}}\"",
|
"view_file": "View file \"{{- path}}\"",
|
||||||
|
@ -968,7 +969,6 @@
|
||||||
"folder_quota_reset": "Folder quota reset",
|
"folder_quota_reset": "Folder quota reset",
|
||||||
"transfer_quota_reset": "Transfer quota reset",
|
"transfer_quota_reset": "Transfer quota reset",
|
||||||
"data_retention_check": "Data retention check",
|
"data_retention_check": "Data retention check",
|
||||||
"metadata_check": "Metadata check",
|
|
||||||
"filesystem": "Filesystem",
|
"filesystem": "Filesystem",
|
||||||
"password_expiration_check": "Password expiration check",
|
"password_expiration_check": "Password expiration check",
|
||||||
"user_expiration_check": "User expiration check",
|
"user_expiration_check": "User expiration check",
|
||||||
|
|
|
@ -260,7 +260,8 @@
|
||||||
"day_of_month": "Giorno mese",
|
"day_of_month": "Giorno mese",
|
||||||
"month": "Mese",
|
"month": "Mese",
|
||||||
"options": "Opzioni",
|
"options": "Opzioni",
|
||||||
"expired": "Scaduto"
|
"expired": "Scaduto",
|
||||||
|
"unsupported": "Funzionalità non più supportata"
|
||||||
},
|
},
|
||||||
"fs": {
|
"fs": {
|
||||||
"view_file": "Visualizza file \"{{- path}}\"",
|
"view_file": "Visualizza file \"{{- path}}\"",
|
||||||
|
@ -968,7 +969,6 @@
|
||||||
"folder_quota_reset": "Ricalcolo quota cartella virtuale",
|
"folder_quota_reset": "Ricalcolo quota cartella virtuale",
|
||||||
"transfer_quota_reset": "Reimpostazione quota trasferimenti",
|
"transfer_quota_reset": "Reimpostazione quota trasferimenti",
|
||||||
"data_retention_check": "Controllo conservazione dati",
|
"data_retention_check": "Controllo conservazione dati",
|
||||||
"metadata_check": "Controllo metadati",
|
|
||||||
"filesystem": "Filesystem",
|
"filesystem": "Filesystem",
|
||||||
"password_expiration_check": "Controllo password scadute",
|
"password_expiration_check": "Controllo password scadute",
|
||||||
"user_expiration_check": "Controllo utenti scaduti",
|
"user_expiration_check": "Controllo utenti scaduti",
|
||||||
|
|
|
@ -182,7 +182,8 @@ explicit grant from the SFTPGo Team (support@sftpgo.com).
|
||||||
case 9:
|
case 9:
|
||||||
return $.t('actions.types.filesystem');
|
return $.t('actions.types.filesystem');
|
||||||
case 10:
|
case 10:
|
||||||
return $.t('actions.types.metadata_check');
|
// metadata check was removed
|
||||||
|
return "";
|
||||||
case 11:
|
case 11:
|
||||||
return $.t('actions.types.password_expiration_check');
|
return $.t('actions.types.password_expiration_check');
|
||||||
case 12:
|
case 12:
|
||||||
|
|
Loading…
Reference in a new issue